summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig82
-rw-r--r--drivers/cpufreq/Kconfig.arm115
-rw-r--r--drivers/cpufreq/Kconfig.powerpc25
-rw-r--r--drivers/cpufreq/Kconfig.x8613
-rw-r--r--drivers/cpufreq/Makefile10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c98
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c153
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h75
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c259
-rw-r--r--drivers/cpufreq/amd-pstate.c1629
-rw-r--r--drivers/cpufreq/amd-pstate.h127
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c90
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c14
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c11
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c5
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c26
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c525
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c49
-rw-r--r--drivers/cpufreq/cpufreq-dt.c71
-rw-r--r--drivers/cpufreq/cpufreq-dt.h2
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c9
-rw-r--r--drivers/cpufreq/cpufreq.c797
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c25
-rw-r--r--drivers/cpufreq/cpufreq_governor.c49
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c29
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/cpufreq_stats.c17
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c77
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c5
-rw-r--r--drivers/cpufreq/e_powersaver.c10
-rw-r--r--drivers/cpufreq/elanfreq.c2
-rw-r--r--drivers/cpufreq/freq_table.c57
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c353
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c54
-rw-r--r--drivers/cpufreq/intel_pstate.c1342
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c5
-rw-r--r--drivers/cpufreq/longhaul.c33
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c11
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c389
-rw-r--r--drivers/cpufreq/maple-cpufreq.c241
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c169
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c120
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c11
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c8
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c12
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c16
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c6
-rw-r--r--drivers/cpufreq/powernow-k7.c18
-rw-r--r--drivers/cpufreq/powernow-k8.c12
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c32
-rw-r--r--drivers/cpufreq/powernv-trace.h44
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c151
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c65
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c383
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c9
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c4
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs222
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c203
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c32
-rw-r--r--drivers/cpufreq/sh-cpufreq.c11
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c5
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c5
-rw-r--r--drivers/cpufreq/spear-cpufreq.c21
-rw-r--r--drivers/cpufreq/speedstep-centrino.c19
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sti-cpufreq.c7
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c280
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c68
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c197
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c188
-rw-r--r--drivers/cpufreq/ti-cpufreq.c144
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c10
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c332
87 files changed, 6091 insertions, 3988 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index f429b9b37b76..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,8 +217,34 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
+config CPUFREQ_VIRT
+ tristate "Virtual cpufreq driver"
+ depends on GENERIC_ARCH_TOPOLOGY
+ help
+ This adds a virtualized cpufreq driver for guest kernels that
+ read/writes to a MMIO region for a virtualized cpufreq device to
+ communicate with the host. It sends performance requests to the host
+ which gets used as a hint to schedule vCPU threads and select CPU
+ frequency. If a VM does not support a virtualized FIE such as AMUs,
+ it updates the frequency scaling factor by polling host CPU frequency
+ to enable accurate Per-Entity Load Tracking for tasks running in the guest.
+
+ If in doubt, say N.
+
config CPUFREQ_DT_PLATDEV
- tristate "Generic DT based cpufreq platdev driver"
+ bool "Generic DT based cpufreq platdev driver"
depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
@@ -231,25 +257,12 @@ if X86
source "drivers/cpufreq/Kconfig.x86"
endif
-if ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
-endif
if PPC32 || PPC64
source "drivers/cpufreq/Kconfig.powerpc"
endif
-if IA64
-config IA64_ACPI_CPUFREQ
- tristate "ACPI Processor P-States driver"
- depends on ACPI_PROCESSOR
- help
- This driver adds a CPUFreq driver which utilizes the ACPI
- Processor Performance States.
-
- If in doubt, say N.
-endif
-
if MIPS
config BMIPS_CPUFREQ
tristate "BMIPS CPUfreq Driver"
@@ -273,6 +286,18 @@ config LOONGSON2_CPUFREQ
If in doubt, say N.
endif
+if LOONGARCH
+config LOONGSON3_CPUFREQ
+ tristate "Loongson3 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for Loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson-3 family processors support this feature.
+
+ If in doubt, say N.
+endif
+
if SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
@@ -312,5 +337,34 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ depends on ARM || ARM64 || RISCV
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ depends on ARM || ARM64 || RISCV
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
endif
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 123b4bbfcfee..9be0503df55a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,35 +3,9 @@
# ARM CPU Frequency scaling drivers
#
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
depends on NVMEM_SUNXI_SID
select PM_OPP
help
@@ -41,26 +15,36 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_AIROHA_SOC_CPUFREQ
+ tristate "Airoha EN7581 SoC CPUFreq support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
+ select PM_OPP
+ default ARCH_AIROHA
+ help
+ This adds the CPUFreq driver for Airoha EN7581 SoCs.
+
config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
select PM_OPP
- default ARCH_APPLE
help
This adds the CPUFreq driver for Apple Silicon machines
(e.g. Apple M1).
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU && CPUFREQ_DT
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
- depends on ARCH_MVEBU && CPUFREQ_DT
- select ARMADA_AP_CPU_CLK
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
+ select ARMADA_AP_CPU_CLK if COMMON_CLK
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.
@@ -82,7 +66,7 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- depends on ARCH_VEXPRESS_SPC
+ depends on ARCH_VEXPRESS_SPC || COMPILE_TEST
select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
@@ -90,8 +74,8 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
- depends on ARCH_BRCMSTB || COMPILE_TEST
- default y
+ depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
+ default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -101,8 +85,9 @@ config ARM_BRCMSTB_AVS_CPUFREQ
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
- default m
+ depends on ARCH_HIGHBANK || COMPILE_TEST
+ depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
+ default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -122,10 +107,11 @@ config ARM_IMX6Q_CPUFREQ
config ARM_IMX_CPUFREQ_DT
tristate "Freescale i.MX8M cpufreq support"
- depends on ARCH_MXC && CPUFREQ_DT
+ depends on CPUFREQ_DT
+ depends on ARCH_MXC || COMPILE_TEST
help
- This adds cpufreq driver support for Freescale i.MX8M series SoCs,
- based on cpufreq-dt.
+ This adds cpufreq driver support for Freescale i.MX7/i.MX8M
+ series SoCs, based on cpufreq-dt.
If in doubt, say N.
@@ -137,7 +123,8 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs"
- depends on ARCH_MEDIATEK && REGULATOR
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on REGULATOR
select PM_OPP
help
This adds the CPUFreq driver support for MediaTek SoCs.
@@ -145,7 +132,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default m
+ default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -156,12 +143,12 @@ config ARM_MEDIATEK_CPUFREQ_HW
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
default ARCH_OMAP2PLUS
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
depends on NVMEM_QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
@@ -173,6 +160,7 @@ config ARM_QCOM_CPUFREQ_NVMEM
config ARM_QCOM_CPUFREQ_HW
tristate "QCOM CPUFreq HW driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on COMMON_CLK
help
Support for the CPUFreq HW driver.
Some QCOM chipsets have a HW engine to offload the steps
@@ -191,8 +179,8 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
- depends on CPU_S3C6410
- default y
+ depends on CPU_S3C6410 || COMPILE_TEST
+ default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -200,8 +188,8 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
- depends on CPU_S5PV210
- default y
+ depends on CPU_S5PV210 || COMPILE_TEST
+ default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -224,14 +212,15 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
- depends on PLAT_SPEAR
- default y
+ depends on PLAT_SPEAR || COMPILE_TEST
+ default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
config ARM_STI_CPUFREQ
tristate "STi CPUFreq support"
- depends on CPUFREQ_DT && SOC_STIH407
+ depends on CPUFREQ_DT
+ depends on SOC_STIH407 || COMPILE_TEST
help
This driver uses the generic OPP framework to match the running
platform with a predefined set of suitable values. If not provided
@@ -241,35 +230,39 @@ config ARM_STI_CPUFREQ
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
- default y
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
- bool "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
- default y
+ tristate "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
config ARM_TEGRA186_CPUFREQ
tristate "Tegra186 CPUFreq support"
- depends on ARCH_TEGRA && TEGRA_BPMP
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on TEGRA_BPMP
help
This adds the CPUFreq driver support for Tegra186 SOCs.
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP
- default y
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
+ depends on TEGRA_BPMP
+ default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
- default y
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
+ default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
@@ -280,7 +273,7 @@ config ARM_TI_CPUFREQ
config ARM_PXA2xx_CPUFREQ
tristate "Intel PXA2xx CPUfreq driver"
- depends on PXA27x || PXA25x
+ depends on PXA27x || PXA25x || COMPILE_TEST
help
This add the CPUFreq driver support for Intel PXA2xx SOCs.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 58151ca56695..551e65d35a1d 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,29 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CPU_FREQ_CBE
- tristate "CBE frequency scaling"
- depends on CBE_RAS && PPC_CELL
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CPU_FREQ_CBE_PMI
- bool "CBE frequency scaling using PMI interface"
- depends on CPU_FREQ_CBE
- default n
- help
- Select this, if you want to use the PMI interface to switch
- frequencies. Using PMI, the processor will not only be able to run at
- lower speed, but also at lower core voltage.
-
-config CPU_FREQ_MAPLE
- bool "Support for Maple 970FX Evaluation Board"
- depends on PPC_MAPLE
- help
- This adds support for frequency switching on Maple 970FX
- Evaluation Board and compatible boards (IBM JS2x blades).
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 438c9e75a04d..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -71,6 +71,7 @@ config X86_AMD_PSTATE_DEFAULT_MODE
config X86_AMD_PSTATE_UT
tristate "selftest for AMD Processor P-State driver"
depends on X86 && ACPI_PROCESSOR
+ depends on X86_AMD_PSTATE
default n
help
This kernel module is used for testing. It's safe to say M here.
@@ -339,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ef8510774913..681d687b5a18 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,10 +15,13 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
+obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
+CFLAGS_powernv-cpufreq.o := -I$(src)
amd_pstate-y := amd-pstate.o amd-pstate-trace.o
##################################################################################
@@ -52,6 +55,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
@@ -89,10 +93,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
-obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
-ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
-obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
-obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
@@ -102,8 +102,8 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
##################################################################################
# Other platform drivers
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
-obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON3_CPUFREQ) += loongson3_cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b2f05d27167e..e73a66785d69 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -50,8 +50,6 @@ enum {
#define AMD_MSR_RANGE (0x7)
#define HYGON_MSR_RANGE (0x7)
-#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
-
struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
@@ -75,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -115,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -323,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -340,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
@@ -407,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy,
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
- udelay(10);
+ usleep_range(10, 15);
}
return 0;
}
@@ -628,7 +616,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -644,13 +639,22 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- highest_perf = amd_get_highest_perf();
- else
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+ } else {
highest_perf = perf_caps.highest_perf;
+ }
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq * 1000;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -663,8 +667,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -674,9 +682,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -826,16 +834,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
@@ -890,8 +902,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost)
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
+ if (acpi_cpufreq_driver.set_boost) {
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
+ }
return result;
@@ -906,7 +929,7 @@ err_free:
return result;
}
-static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
@@ -919,8 +942,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
free_cpumask_var(data->freqdomain_cpus);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
@@ -935,7 +956,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
@@ -1011,22 +1031,20 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int acpi_cpufreq_remove(struct platform_device *pdev)
+static void acpi_cpufreq_remove(struct platform_device *pdev)
{
pr_debug("%s\n", __func__);
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
-
- return 0;
}
static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove = acpi_cpufreq_remove,
+ .remove = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
new file mode 100644
index 000000000000..b6b1cdc4d11d
--- /dev/null
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+struct airoha_cpufreq_priv {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
+ struct platform_device *cpufreq_dt;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+/* NOP function to disable OPP from setting clock */
+static int airoha_cpufreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp,
+ void *data, bool scaling_down)
+{
+ return 0;
+}
+
+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
+static const char * const airoha_cpufreq_pd_names[] = { "perf" };
+
+static int airoha_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = airoha_cpufreq_pd_names,
+ .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names),
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
+ };
+ struct dev_pm_opp_config config = {
+ .clk_names = airoha_cpufreq_clk_names,
+ .config_clks = airoha_cpufreq_config_clks_nop,
+ };
+ struct platform_device *cpufreq_dt;
+ struct airoha_cpufreq_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev;
+ int ret;
+
+ /* CPUs refer to the same OPP table */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set OPP table conf with NOP config_clks */
+ priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (priv->opp_token < 0)
+ return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
+
+ /* Attach PM for OPP */
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &priv->pd_list);
+ if (ret)
+ goto clear_opp_config;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
+ goto detach_pm;
+ }
+
+ priv->cpufreq_dt = cpufreq_dt;
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+detach_pm:
+ dev_pm_domain_detach_list(priv->pd_list);
+clear_opp_config:
+ dev_pm_opp_clear_config(priv->opp_token);
+
+ return ret;
+}
+
+static void airoha_cpufreq_remove(struct platform_device *pdev)
+{
+ struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt);
+
+ dev_pm_domain_detach_list(priv->pd_list);
+
+ dev_pm_opp_clear_config(priv->opp_token);
+}
+
+static struct platform_driver airoha_cpufreq_driver = {
+ .probe = airoha_cpufreq_probe,
+ .remove = airoha_cpufreq_remove,
+ .driver = {
+ .name = "airoha-cpufreq",
+ },
+};
+
+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
+ { .compatible = "airoha,en7581" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
+
+static int __init airoha_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(airoha_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&airoha_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (ret)
+ platform_driver_unregister(&airoha_cpufreq_driver);
+
+ return ret;
+}
+module_init(airoha_cpufreq_init);
+
+static void __exit airoha_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&airoha_cpufreq_driver);
+}
+module_exit(airoha_cpufreq_exit);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..32e1bdc588c5 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,15 +24,14 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf,
- unsigned long target_perf,
- unsigned long capacity,
+ TP_PROTO(u8 min_perf,
+ u8 target_perf,
+ u8 capacity,
u64 freq,
u64 mperf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,20 +43,18 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
TP_STRUCT__entry(
- __field(unsigned long, min_perf)
- __field(unsigned long, target_perf)
- __field(unsigned long, capacity)
+ __field(u8, min_perf)
+ __field(u8, target_perf)
+ __field(u8, capacity)
__field(unsigned long long, freq)
__field(unsigned long long, mperf)
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,24 +67,72 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
- (unsigned long)__entry->min_perf,
- (unsigned long)__entry->target_perf,
- (unsigned long)__entry->capacity,
+ TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+ (u8)__entry->min_perf,
+ (u8)__entry->target_perf,
+ (u8)__entry->capacity,
(unsigned long long)__entry->freq,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ u8 highest_perf,
+ u8 epp,
+ u8 min_perf,
+ u8 max_perf,
+ bool boost,
+ bool changed
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost,
+ changed),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(u8, highest_perf)
+ __field(u8, epp)
+ __field(u8, min_perf)
+ __field(u8, max_perf)
+ __field(bool, boost)
+ __field(bool, changed)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ __entry->changed = changed;
+ ),
+
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
+ (unsigned int)__entry->cpu_id,
+ (u8)__entry->min_perf,
+ (u8)__entry->max_perf,
+ (u8)__entry->highest_perf,
+ (u8)__entry->epp,
+ (bool)__entry->boost,
+ (bool)__entry->changed
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 7f3fe2048981..447b9aa5ce40 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -22,69 +22,48 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
-#include <linux/amd-pstate.h>
+#include <linux/cleanup.h>
#include <acpi/cppc_acpi.h>
-/*
- * Abbreviations:
- * amd_pstate_ut: used as a shortform for AMD P-State unit test.
- * It helps to keep variable names smaller, simpler
- */
-enum amd_pstate_ut_result {
- AMD_PSTATE_UT_RESULT_PASS,
- AMD_PSTATE_UT_RESULT_FAIL,
-};
+#include <asm/msr.h>
+
+#include "amd-pstate.h"
+
struct amd_pstate_ut_struct {
const char *name;
- void (*func)(u32 index);
- enum amd_pstate_ut_result result;
+ int (*func)(u32 index);
};
/*
* Kernel module for testing the AMD P-State unit test
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index);
-static void amd_pstate_ut_check_enabled(u32 index);
-static void amd_pstate_ut_check_perf(u32 index);
-static void amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_acpi_cpc_valid(u32 index);
+static int amd_pstate_ut_check_enabled(u32 index);
+static int amd_pstate_ut_check_perf(u32 index);
+static int amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
{"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
{"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
- {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq },
+ {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver }
};
static bool get_shared_mem(void)
{
bool result = false;
- char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
- char buf[5] = {0};
- struct file *filp = NULL;
- loff_t pos = 0;
- ssize_t ret;
-
- if (!boot_cpu_has(X86_FEATURE_CPPC)) {
- filp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(filp))
- pr_err("%s unable to open %s file!\n", __func__, path);
- else {
- ret = kernel_read(filp, &buf, sizeof(buf), &pos);
- if (ret < 0)
- pr_err("%s read %s file fail ret=%ld!\n",
- __func__, path, (long)ret);
- filp_close(filp, NULL);
- }
- if ('Y' == *buf)
- result = true;
- }
+ if (!boot_cpu_has(X86_FEATURE_CPPC))
+ result = true;
return result;
}
@@ -92,117 +71,114 @@ static bool get_shared_mem(void)
/*
* check the _CPC object is present in SBIOS.
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+static int amd_pstate_ut_acpi_cpc_valid(u32 index)
{
- if (acpi_cpc_valid())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!acpi_cpc_valid()) {
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ return -EINVAL;
}
+
+ return 0;
}
-static void amd_pstate_ut_pstate_enable(u32 index)
+/*
+ * check if amd pstate is enabled
+ */
+static int amd_pstate_ut_check_enabled(u32 index)
{
- int ret = 0;
u64 cppc_enable = 0;
+ int ret;
- ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ if (get_shared_mem())
+ return 0;
+
+ ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
- return;
+ pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ return ret;
}
- if (cppc_enable)
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+
+ if (!cppc_enable) {
pr_err("%s amd pstate must be enabled!\n", __func__);
+ return -EINVAL;
}
-}
-/*
- * check if amd pstate is enabled
- */
-static void amd_pstate_ut_check_enabled(u32 index)
-{
- if (get_shared_mem())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else
- amd_pstate_ut_pstate_enable(index);
+ return 0;
}
/*
* check if performance values are reasonable.
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
*/
-static void amd_pstate_ut_check_perf(u32 index)
+static int amd_pstate_ut_check_perf(u32 index)
{
int cpu = 0, ret = 0;
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
u64 cap1 = 0;
struct cppc_perf_caps cppc_perf;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
+ union perf_cached cur_perf;
- highest_perf = amd_get_highest_perf();
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
- for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
if (get_shared_mem()) {
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- return;
+ return ret;
}
+ highest_perf = cppc_perf.highest_perf;
nominal_perf = cppc_perf.nominal_perf;
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
} else {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- return;
+ return ret;
}
- nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
- lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
- lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
+ nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
}
- if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
- (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
- (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
- (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, highest_perf, cpudata->highest_perf,
- nominal_perf, cpudata->nominal_perf,
- lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
- lowest_perf, cpudata->lowest_perf);
- return;
+ cur_perf = READ_ONCE(cpudata->perf);
+ if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
+ pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
+ __func__, cpu, highest_perf, cur_perf.highest_perf);
+ return -EINVAL;
+ }
+ if (nominal_perf != cur_perf.nominal_perf ||
+ (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
+ (lowest_perf != cur_perf.lowest_perf)) {
+ pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, nominal_perf, cur_perf.nominal_perf,
+ lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
+ lowest_perf, cur_perf.lowest_perf);
+ return -EINVAL;
}
if (!((highest_perf >= nominal_perf) &&
(nominal_perf > lowest_nonlinear_perf) &&
- (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_nonlinear_perf >= lowest_perf) &&
(lowest_perf > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- return;
+ return -EINVAL;
}
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ return 0;
}
/*
@@ -210,55 +186,88 @@ static void amd_pstate_ut_check_perf(u32 index)
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
* check max freq when set support boost mode.
*/
-static void amd_pstate_ut_check_freq(u32 index)
+static int amd_pstate_ut_check_freq(u32 index)
{
int cpu = 0;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
- (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
- (cpudata->min_freq > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
+ (policy->cpuinfo.min_freq > 0))) {
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
- cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- return;
+ __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
+ return -EINVAL;
}
- if (cpudata->min_freq != policy->min) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
- __func__, cpu, cpudata->min_freq, policy->min);
- return;
+ if (cpudata->lowest_nonlinear_freq != policy->min) {
+ pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
+ return -EINVAL;
}
if (cpudata->boost_supported) {
- if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if ((policy->max != policy->cpuinfo.max_freq) &&
+ (policy->max != cpudata->nominal_freq)) {
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
- __func__, cpu, policy->max, cpudata->max_freq,
+ __func__, cpu, policy->max, policy->cpuinfo.max_freq,
cpudata->nominal_freq);
- return;
+ return -EINVAL;
}
} else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- return;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int amd_pstate_set_mode(enum amd_pstate_mode mode)
+{
+ const char *mode_str = amd_pstate_get_mode_string(mode);
+
+ pr_debug("->setting mode to %s\n", mode_str);
+
+ return amd_pstate_update_status(mode_str, strlen(mode_str));
+}
+
+static int amd_pstate_ut_check_driver(u32 index)
+{
+ enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ enum amd_pstate_mode orig_mode = amd_pstate_get_status();
+ int ret;
+
+ for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
+ ret = amd_pstate_set_mode(mode1);
+ if (ret)
+ return ret;
+ for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
+ if (mode1 == mode2)
+ continue;
+ ret = amd_pstate_set_mode(mode2);
+ if (ret)
+ goto out;
}
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_set_mode(orig_mode);
+ return ret;
}
static int __init amd_pstate_ut_init(void)
@@ -266,16 +275,12 @@ static int __init amd_pstate_ut_init(void)
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
for (i = 0; i < arr_size; i++) {
- amd_pstate_ut_cases[i].func(i);
- switch (amd_pstate_ut_cases[i].result) {
- case AMD_PSTATE_UT_RESULT_PASS:
+ int ret = amd_pstate_ut_cases[i].func(i);
+
+ if (ret)
+ pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
+ else
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- case AMD_PSTATE_UT_RESULT_FAIL:
- default:
- pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- }
}
return 0;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 81fba0dcbee9..c45bc98721d2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -36,7 +37,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
-#include <linux/amd-pstate.h>
+#include <linux/topology.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -45,25 +46,47 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
+#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
+
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+};
+static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX);
+
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
+{
+ if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX)
+ mode = AMD_PSTATE_UNDEFINED;
+ return amd_pstate_mode_string[mode];
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
-/*
- * TODO: We need more time to fine tune processors with shared memory solution
- * with community together.
- *
- * There are some performance drops on the CPU benchmarks which reports from
- * Suse. We are co-working with them to fine tune the shared memory solution. So
- * we disable it by default to go acpi-cpufreq on these processors and add a
- * module parameter to be able to enable it manually for debugging.
- */
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
-static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
* AMD Energy Preference Performance (EPP)
@@ -87,6 +110,7 @@ enum energy_perf_value_index {
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
+ EPP_INDEX_MAX,
};
static const char * const energy_perf_strings[] = {
@@ -95,8 +119,8 @@ static const char * const energy_perf_strings[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
- NULL
};
+static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX);
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0,
@@ -104,241 +128,337 @@ static unsigned int epp_values[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
- };
+};
+static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX);
typedef int (*cppc_mode_transition_fn)(int);
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
- for (i=0; i < AMD_PSTATE_MAX; i++) {
+ for (i = 0; i < AMD_PSTATE_MAX; i++) {
if (!strncmp(str, amd_pstate_mode_string[i], size))
return i;
}
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
{
- u64 epp;
+ u64 value;
int ret;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
- }
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- s16 epp;
- int index = -EINVAL;
+ return static_call(amd_pstate_get_epp)(cpudata);
+}
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
+{
+ u64 epp;
+ int ret;
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return index;
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
- int ret;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ if (value == prev)
+ return 0;
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
+ if (fast_switch) {
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
+ return 0;
} else {
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
+ if (ret)
return ret;
- }
- cpudata->epp_cached = epp;
}
- return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
}
-static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
- int pref_index)
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
+
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
+ bool fast_switch)
{
- int epp = -EINVAL;
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
+ max_perf, epp, fast_switch);
+}
+
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 value, prev;
int ret;
- if (!pref_index) {
- pr_debug("EPP pref_index is invalid\n");
- return -EINVAL;
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
}
- if (epp == -EINVAL)
- epp = epp_values[pref_index];
+ if (value == prev)
+ return 0;
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("EPP cannot be set under performance policy\n");
- return -EBUSY;
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
- ret = amd_pstate_set_epp(cpudata, epp);
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
}
-static inline int pstate_enable(bool enable)
-{
- int ret, cpu;
- unsigned long logical_proc_id_mask = 0;
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
- if (enable == cppc_enabled)
- return 0;
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ return static_call(amd_pstate_set_epp)(policy, epp);
+}
- for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_die_id(cpu);
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
- if (test_bit(logical_id, &logical_proc_id_mask))
- continue;
- set_bit(logical_id, &logical_proc_id_mask);
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
- enable);
- if (ret)
- return ret;
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
}
- cppc_enabled = enable;
- return 0;
-}
-
-static int cppc_enable(bool enable)
-{
- int cpu, ret = 0;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (enable == cppc_enabled)
+ if (epp == epp_cached)
return 0;
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
- if (ret)
- return ret;
-
- /* Enable autonomous mode for EPP */
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- /* Set desired perf as zero to allow EPP firmware control */
- perf_ctrls.desired_perf = 0;
- ret = cppc_set_perf(cpu, &perf_ctrls);
- if (ret)
- return ret;
- }
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
- cppc_enabled = enable;
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
+{
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+}
-static inline int amd_pstate_enable(bool enable)
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
{
- return static_call(amd_pstate_enable)(enable);
+ return cppc_set_enable(policy->cpu, 1);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
+
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
{
- u64 cap1;
- u32 highest_perf;
+ return static_call(amd_pstate_cppc_enable)(policy);
+}
+
+static int msr_init_perf(struct amd_cpudata *cpudata)
+{
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
/*
- * TODO: Introduce AMD specific power feature.
- *
- * CPPC entry doesn't indicate the highest performance in some ASICs.
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
*/
- highest_perf = amd_get_highest_perf();
- if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
- u32 highest_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- highest_perf = amd_get_highest_perf();
- if (highest_perf > cppc_perf.highest_perf)
- highest_perf = cppc_perf.highest_perf;
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
@@ -353,44 +473,63 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
-{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
-}
-
-static void cppc_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(policy, epp);
+
+ if (ret)
+ return ret;
+ }
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
-}
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
-{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -399,8 +538,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -426,69 +565,106 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
- u64 value = prev;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ if (!policy)
+ return;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
+
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
+
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- return;
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
+}
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
+{
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+
+ if (!policy)
+ return -EINVAL;
+
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ }
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ cpufreq_verify_within_cpu_limits(policy_data);
+
+ return 0;
}
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_cpu_limits(policy);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- return 0;
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
+
+ WRITE_ONCE(cpudata->perf, perf);
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
@@ -499,8 +675,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, fast_switch, policy->governor->flags);
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
@@ -518,7 +695,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return amd_pstate_update_freq(policy, target_freq, true);
+ if (!amd_pstate_update_freq(policy, target_freq, true))
+ return target_freq;
+ return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,
@@ -526,161 +705,271 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf, max_freq;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned int target_freq;
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+
+ if (!policy)
+ return;
+
+ cpudata = policy->driver_data;
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cap_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
- target_freq = div_u64(des_perf * max_freq, max_perf);
- policy->cur = target_freq;
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
- cpufreq_cpu_put(policy);
}
-static int amd_get_min_freq(struct amd_cpudata *cpudata)
+static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
- struct cppc_perf_caps cppc_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ u32 nominal_freq, max_freq;
+ int ret = 0;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ nominal_freq = READ_ONCE(cpudata->nominal_freq);
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
-}
+ if (on)
+ policy->cpuinfo.max_freq = max_freq;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
-static int amd_get_max_freq(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
+ policy->max = policy->cpuinfo.max_freq;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ if (cppc_state == AMD_PSTATE_PASSIVE) {
+ ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq);
+ if (ret < 0)
+ pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu);
+ }
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
+ return ret < 0 ? ret : 0;
+}
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EOPNOTSUPP;
+ }
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+ ret = amd_pstate_cpu_boost_update(policy, state);
+ refresh_frequency_limits(policy);
- /* Switch to khz */
- return max_freq * 1000;
+ return ret;
}
-static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
+ u64 boost_val;
+ int ret = -1;
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ /*
+ * If platform has no CPB support or disable it, initialize current driver
+ * boost_enabled state to be false, it is not an error for cpufreq core to handle.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPB)) {
+ pr_debug_once("Boost CPB capabilities not present in the processor\n");
+ ret = 0;
+ goto exit_err;
+ }
+
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ if (ret) {
+ pr_err_once("failed to read initial CPU boost state!\n");
+ ret = -EIO;
+ goto exit_err;
+ }
+
+ if (!(boost_val & MSR_K7_HWCR_CPB_DIS))
+ cpudata->boost_supported = true;
+
+ return 0;
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
+exit_err:
+ cpudata->boost_supported = false;
+ return ret;
}
-static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+static void amd_perf_ctl_reset(unsigned int cpu)
{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+#define CPPC_MAX_PERF U8_MAX
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+ /* user disabled or not detected */
+ if (!amd_pstate_prefcore)
+ return;
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
+ /* should use amd-hfi instead */
+ if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS) &&
+ IS_ENABLED(CONFIG_AMD_HFI)) {
+ amd_pstate_prefcore = false;
+ return;
+ }
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+ cpudata->hw_prefcore = true;
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
+ /* Priorities must be initialized before ITMT support can be toggled on. */
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
-static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
+ struct amd_cpudata *cpudata;
+ u32 prev_high = 0, cur_high = 0;
+ bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
- if (!cpudata->boost_supported) {
- pr_err("Boost mode is not supported by this processor or SBIOS\n");
- return -EINVAL;
- }
+ if (!amd_pstate_prefcore)
+ return;
- if (state)
- policy->cpuinfo.max_freq = cpudata->max_freq;
- else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ if (amd_get_highest_perf(cpu, &cur_high))
+ return;
- policy->max = policy->cpuinfo.max_freq;
+ cpudata = policy->driver_data;
- ret = freq_qos_update_request(&cpudata->req[1],
- policy->cpuinfo.max_freq);
- if (ret < 0)
- return ret;
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+ highest_perf_changed = (prev_high != cur_high);
+ if (highest_perf_changed) {
+ WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- return 0;
+ if (cur_high < CPPC_MAX_PERF) {
+ sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
+ }
}
-static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+/*
+ * Get pstate transition delay time from ACPI tables that firmware set
+ * instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 highest_perf, nominal_perf;
+ int transition_delay_ns;
+
+ transition_delay_ns = cppc_get_transition_latency(cpu);
+ if (transition_delay_ns < 0) {
+ if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
+ return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
+ else
+ return AMD_PSTATE_TRANSITION_DELAY;
+ }
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ return transition_delay_ns / NSEC_PER_USEC;
+}
- if (highest_perf <= nominal_perf)
- return;
+/*
+ * Get pstate transition latency value from ACPI tables that firmware
+ * set instead of using hardcode value directly.
+ */
+static u32 amd_pstate_get_transition_latency(unsigned int cpu)
+{
+ int transition_latency;
+
+ transition_latency = cppc_get_transition_latency(cpu);
+ if (transition_latency < 0)
+ return AMD_PSTATE_TRANSITION_LATENCY;
- cpudata->boost_supported = true;
- current_pstate_driver->boost_enabled = true;
+ return transition_latency;
}
-static void amd_perf_ctl_reset(unsigned int cpu)
+/*
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
+ *
+ * Requires: all perf members of @cpudata to be initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
+ struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq;
+ else
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
+
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
+
+ min_freq *= 1000;
+
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
+
+ /**
+ * Below values need to be initialized correctly, otherwise driver will fail to load
+ * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf
+ * lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
+ * Check _CPC in ACPI table objects if any values are incorrect
+ */
+ if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
+ pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
+ min_freq, max_freq, nominal_freq);
+ return -EINVAL;
+ }
+
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
+ pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
+ lowest_nonlinear_freq, min_freq, nominal_freq);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -701,35 +990,42 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ amd_pstate_init_prefcore(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
- ret = -EINVAL;
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
goto free_cpudata1;
- }
- policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
- policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
+ policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
- policy->min = min_freq;
- policy->max = max_freq;
+ perf = READ_ONCE(cpudata->perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
+
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- if (boot_cpu_has(X86_FEATURE_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_CPPC))
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
@@ -742,15 +1038,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
- amd_pstate_boost_init(cpudata);
if (!current_pstate_driver->adjust_perf)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
@@ -759,42 +1048,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
-static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
policy->fast_switch_possible = false;
kfree(cpudata);
-
- return 0;
-}
-
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
}
/* Sysfs attributes */
@@ -807,27 +1077,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = amd_get_max_freq(cpudata);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -837,24 +1107,49 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ struct amd_cpudata *cpudata;
+
+ cpudata = policy->driver_data;
+
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
+}
+
+static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u8 perf;
struct amd_cpudata *cpudata = policy->driver_data;
- perf = READ_ONCE(cpudata->highest_perf);
+ perf = READ_ONCE(cpudata->prefcore_ranking);
return sysfs_emit(buf, "%u\n", perf);
}
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+ char *buf)
+{
+ bool hw_prefcore;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+}
+
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
- int i = 0;
- int offset = 0;
+ int offset = 0, i;
+ struct amd_cpudata *cpudata = policy->driver_data;
- while (energy_perf_strings[i] != NULL)
- offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sysfs_emit_at(buf, offset, "%s\n",
+ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
+
+ for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]);
- sysfs_emit_at(buf, offset, "\n");
+ offset += sysfs_emit_at(buf, offset, "\n");
return offset;
}
@@ -863,61 +1158,107 @@ static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct amd_cpudata *cpudata = policy->driver_data;
- char str_preference[21];
ssize_t ret;
+ u8 epp;
- ret = sscanf(buf, "%20s", str_preference);
- if (ret != 1)
- return -EINVAL;
-
- ret = match_string(energy_perf_strings, -1, str_preference);
+ ret = sysfs_match_string(energy_perf_strings, buf);
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
+
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
- return ret ?: count;
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int preference;
+ u8 preference, epp;
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_enable(false);
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int amd_pstate_register_driver(int mode)
{
int ret;
- if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
- else if (mode == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
- else
- return -EINVAL;
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
cppc_state = mode;
+
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
+
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
amd_pstate_driver_cleanup();
return ret;
}
+
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
return 0;
}
@@ -934,10 +1275,10 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
cppc_state = mode;
- if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
+ if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
}
@@ -994,7 +1335,13 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
-static int amd_pstate_update_status(const char *buf, size_t size)
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
+int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1002,53 +1349,61 @@ static int amd_pstate_update_status(const char *buf, size_t size)
return -EINVAL;
mode_idx = get_mode_idx_from_str(buf, size);
+ if (mode_idx < 0)
+ return mode_idx;
- if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
- return -EINVAL;
-
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
+EXPORT_SYMBOL_GPL(amd_pstate_update_status);
-static ssize_t show_status(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
-static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+static ssize_t status_store(struct device *a, struct device_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
+static ssize_t prefcore_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
+}
+
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
-define_one_global_rw(status);
+static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
NULL,
};
@@ -1056,13 +1411,16 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
&amd_pstate_max_freq,
&amd_pstate_lowest_nonlinear_freq,
&amd_pstate_highest_perf,
+ &amd_pstate_prefcore_ranking,
+ &amd_pstate_hw_prefcore,
&energy_performance_preference,
&energy_performance_available_preferences,
NULL,
};
static struct attribute *pstate_global_attributes[] = {
- &status.attr,
+ &dev_attr_status.attr,
+ &dev_attr_prefcore.attr,
NULL
};
@@ -1093,10 +1451,10 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
struct device *dev;
- u64 value;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -1112,256 +1470,186 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
- min_freq = amd_get_min_freq(cpudata);
- max_freq = amd_get_max_freq(cpudata);
- nominal_freq = amd_get_nominal_freq(cpudata);
- lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
- if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
- dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
- min_freq, max_freq);
- ret = -EINVAL;
+ amd_pstate_init_prefcore(cpudata);
+
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
goto free_cpudata1;
- }
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
- /* It will be updated by governor */
- policy->cur = policy->cpuinfo.min_freq;
+ ret = amd_pstate_init_boost_support(cpudata);
+ if (ret)
+ goto free_cpudata1;
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+ perf = READ_ONCE(cpudata->perf);
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
policy->driver_data = cpudata;
- cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/*
* Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
+ if (ret)
+ return ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_cap1_cached, value);
- }
- amd_pstate_boost_init(cpudata);
+ current_pstate_driver->adjust_perf = NULL;
return 0;
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
-static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
-{
- pr_debug("CPU %d exiting\n", policy->cpu);
- return 0;
-}
-
-static void amd_pstate_epp_init(unsigned int cpu)
+static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
- u64 value;
- s16 epp;
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
+ if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = max_perf;
-
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
+ kfree(cpudata);
+ policy->driver_data = NULL;
+ }
- if (cpudata->epp_policy == cpudata->policy)
- goto skip_epp;
+ pr_debug("CPU %d exiting\n", policy->cpu);
+}
- cpudata->epp_policy = cpudata->policy;
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf;
+ u8 epp;
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- goto skip_epp;
- }
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
+ else
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- /* Set initial EPP value */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- }
+ perf = READ_ONCE(cpudata->perf);
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- amd_pstate_set_epp(cpudata, epp);
-skip_epp:
- cpufreq_cpu_put(policy);
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
- policy->cpuinfo.max_freq, policy->max);
-
cpudata->policy = policy->policy;
- amd_pstate_epp_init(policy->cpu);
-
- return 0;
-}
-
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
-{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
- int ret;
-
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_epp_update_limit(policy, true);
if (ret)
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+ return ret;
- value = READ_ONCE(cpudata->cppc_req_cached);
- max_perf = READ_ONCE(cpudata->highest_perf);
+ /*
+ * policy->cur is never updated with the amd_pstate_epp driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- }
+ return 0;
}
-static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
-
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
-
- return 0;
+ return amd_pstate_cppc_enable(policy);
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- mutex_lock(&amd_pstate_limits_lock);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.max_perf = min_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- }
- mutex_unlock(&amd_pstate_limits_lock);
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
-
- if (cpudata->suspended)
- return 0;
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
+ if (ret)
+ return ret;
- return 0;
-}
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
-static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
- pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
return 0;
}
-static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+static int amd_pstate_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
-
- /* avoid suspending when EPP is not enabled */
- if (cppc_state != AMD_PSTATE_ACTIVE)
- return 0;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
- /* set this flag to avoid setting core offline*/
- cpudata->suspended = true;
-
- /* disable CPPC in lowlevel firmware */
- ret = amd_pstate_enable(false);
- if (ret)
- pr_err("failed to suspend, return %d\n", ret);
-
- return 0;
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
}
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
@@ -1369,12 +1657,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ int ret;
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ ret = amd_pstate_epp_update_limit(policy, false);
+ if (ret)
+ return ret;
cpudata->suspended = false;
}
@@ -1389,44 +1677,84 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
+ .update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
- .verify = amd_pstate_epp_verify_policy,
+ .verify = amd_pstate_verify,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
- .offline = amd_pstate_epp_cpu_offline,
- .online = amd_pstate_epp_cpu_online,
- .suspend = amd_pstate_epp_suspend,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
.resume = amd_pstate_epp_resume,
+ .update_limits = amd_pstate_update_limits,
+ .set_boost = amd_pstate_set_boost,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
-static int __init amd_pstate_set_driver(int mode_idx)
+/*
+ * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
+ * show the debug message that helps to check if the CPU has CPPC support for loading issue.
+ */
+static bool amd_cppc_supported(void)
{
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ bool warn = false;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
+ if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) {
+ pr_debug_once("CPPC feature is not supported by the processor\n");
+ return false;
+ }
- return 0;
+ /*
+ * If the CPPC feature is disabled in the BIOS for processors
+ * that support MSR-based CPPC, the AMD Pstate driver may not
+ * function correctly.
+ *
+ * For such processors, check the CPPC flag and display a
+ * warning message if the platform supports CPPC.
+ *
+ * Note: The code check below will not abort the driver
+ * registration process because of the code is added for
+ * debugging purposes. Besides, it may still be possible for
+ * the driver to work using the shared-memory mechanism.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ switch (c->x86_model) {
+ case 0x60 ... 0x6F:
+ case 0x80 ... 0xAF:
+ warn = true;
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
+ cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ switch (c->x86_model) {
+ case 0x10 ... 0x1F:
+ case 0x40 ... 0xAF:
+ warn = true;
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
+ warn = true;
+ }
}
- return -EINVAL;
+ if (warn)
+ pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n"
+ "Please enable it if your BIOS has the CPPC option.\n");
+ return true;
}
static int __init amd_pstate_init(void)
@@ -1437,6 +1765,11 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
+ /* show debug message only if CPPC is not supported */
+ if (!amd_cppc_supported())
+ return -EOPNOTSUPP;
+
+ /* show warning message when BIOS broken or ACPI disabled */
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
@@ -1446,55 +1779,59 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
- switch (cppc_state) {
- case AMD_PSTATE_UNDEFINED:
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
+ /*
+ * determine the driver mode from the command line or kernel config.
+ * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED.
+ * command line options will override the kernel config settings.
+ */
+
+ if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
- * 2. Server platforms
- * 3. Shared memory designs
+ * 2. Server platforms with CPUs older than Family 0x1A.
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server() ||
- !boot_cpu_has(X86_FEATURE_CPPC)) {
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
- ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
- if (ret)
- return ret;
- break;
- case AMD_PSTATE_DISABLE:
+ /* get driver mode from kernel config option [1:4] */
+ cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
+ }
+
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
- case AMD_PSTATE_PASSIVE:
- case AMD_PSTATE_ACTIVE:
- case AMD_PSTATE_GUIDED:
- break;
- default:
- return -EINVAL;
}
/* capability check */
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- if (cppc_state != AMD_PSTATE_ACTIVE)
- current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
- if (ret) {
- pr_err("failed to enable with return %d\n", ret);
- return ret;
+ if (amd_pstate_prefcore) {
+ ret = amd_detect_prefcore(&amd_pstate_prefcore);
+ if (ret)
+ return ret;
}
- ret = cpufreq_register_driver(current_pstate_driver);
- if (ret)
+ ret = amd_pstate_register_driver(cppc_state);
+ if (ret) {
pr_err("failed to register with return %d\n", ret);
+ return ret;
+ }
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
@@ -1527,7 +1864,17 @@ static int __init amd_pstate_param(char *str)
return amd_pstate_set_driver(mode_idx);
}
+
+static int __init amd_prefcore_param(char *str)
+{
+ if (!strcmp(str, "disable"))
+ amd_pstate_prefcore = false;
+
+ return 0;
+}
+
early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
new file mode 100644
index 000000000000..cb45fdca27a6
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+/*********************************************************************
+ * AMD P-state INTERFACE *
+ *********************************************************************/
+
+/**
+ * union perf_cached - A union to cache performance-related data.
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that support the preferred core feature, the highest_perf value maybe
+ * configured to any value in the range 166-255 by the firmware (because the preferred
+ * core ranking is encoded in the highest_perf value). To maintain consistency across
+ * all platforms, we split the highest_perf and preferred core ranking values into
+ * cpudata->perf.highest_perf and cpudata->prefcore_ranking.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option
+ */
+union perf_cached {
+ struct {
+ u8 highest_perf;
+ u8 nominal_perf;
+ u8 lowest_nonlinear_perf;
+ u8 lowest_perf;
+ u8 min_limit_perf;
+ u8 max_limit_perf;
+ u8 bios_min_perf;
+ };
+ u64 val;
+};
+
+/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @perf: cached performance-related data
+ * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
+ * priority.
+ * @min_limit_freq: Cached value of policy->min (in khz)
+ * @max_limit_freq: Cached value of policy->max (in khz)
+ * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value (in khz)
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @hw_prefcore: check whether HW supports preferred core featue.
+ * Only when hw_prefcore and early prefcore param are true,
+ * AMD P-State driver supports preferred core featue.
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ union perf_cached perf;
+
+ u8 prefcore_ranking;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
+ bool boost_supported;
+ bool hw_prefcore;
+
+ /* EPP feature related attributes*/
+ u32 policy;
+ bool suspended;
+ u8 epp_default;
+};
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_get_status(void);
+int amd_pstate_update_status(const char *buf, size_t size);
+
+#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 59b19b9975e8..13fed4b9e02b 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
- if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 021f423705e1..b1d29b7af232 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -22,11 +22,14 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define APPLE_DVFS_CMD 0x20
-#define APPLE_DVFS_CMD_BUSY BIT(31)
-#define APPLE_DVFS_CMD_SET BIT(25)
-#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
-#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22)
+#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22
+#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD_PS1_SHIFT 0
/* Same timebase as CPU counter (24MHz) */
#define APPLE_DVFS_LAST_CHG_TIME 0x38
@@ -35,6 +38,9 @@
* Apple ran out of bits and had to shift this in T8112...
*/
#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3
+#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0)
#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
@@ -52,12 +58,15 @@
#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
-#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+#define APPLE_DVFS_TRANSITION_TIMEOUT 400
struct apple_soc_cpufreq_info {
+ bool has_ps2;
u64 max_pstate;
u64 cur_pstate_mask;
u64 cur_pstate_shift;
+ u64 ps1_mask;
+ u64 ps1_shift;
};
struct apple_cpu_priv {
@@ -68,24 +77,46 @@ struct apple_cpu_priv {
static struct cpufreq_driver apple_soc_cpufreq_driver;
+static const struct apple_soc_cpufreq_info soc_s5l8960x_info = {
+ .has_ps2 = false,
+ .max_pstate = 7,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X,
+ .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT,
+};
+
static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .has_ps2 = true,
.max_pstate = 15,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .has_ps2 = false,
.max_pstate = 31,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_default_info = {
+ .has_ps2 = false,
.max_pstate = 15,
.cur_pstate_mask = 0, /* fallback */
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
-static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
+ {
+ .compatible = "apple,s5l8960x-cluster-cpufreq",
+ .data = &soc_s5l8960x_info,
+ },
{
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
@@ -103,13 +134,19 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
if (priv->info->cur_pstate_mask) {
- u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
} else {
@@ -148,9 +185,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
return -EIO;
}
- reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg &= ~priv->info->ps1_mask;
+ reg |= pstate << priv->info->ps1_shift;
+ if (priv->info->has_ps2) {
+ reg &= ~APPLE_DVFS_CMD_PS2;
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ }
reg |= APPLE_DVFS_CMD_SET;
writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
@@ -195,12 +235,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
return 0;
}
-static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Filled in below if boost is enabled */
- NULL,
-};
-
static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, i;
@@ -275,23 +309,13 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
policy->fast_switch_possible = true;
policy->suspend_freq = freq_table[0].frequency;
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- } else {
- apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- apple_soc_cpufreq_driver.boost_enabled = true;
- }
- }
-
return 0;
out_free_cpufreq_table:
@@ -305,7 +329,7 @@ out_iounmap:
return ret;
}
-static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+static void apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct apple_cpu_priv *priv = policy->driver_data;
@@ -313,8 +337,6 @@ static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
iounmap(priv->reg_base);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver apple_soc_cpufreq_driver = {
@@ -328,7 +350,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.target_index = apple_soc_cpufreq_set_target,
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
- .attr = apple_soc_cpufreq_hw_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index b74289a95a17..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -14,10 +14,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
@@ -104,11 +102,7 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- /*
- * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
- * unstable because we do not know how to configure it properly.
- */
- /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
@@ -271,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -279,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 8afefdea4d80..d96c1718f7f8 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
@@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- struct cpumask cpus;
+ static struct cpumask cpus, shared_cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
@@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void)
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
- struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
@@ -165,7 +164,7 @@ static int __init armada_8k_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 39221a9a187a..36051880640b 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -121,11 +121,9 @@ static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
return 0;
}
-static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+static void bmips_cpufreq_exit(struct cpufreq_policy *policy)
{
kfree(policy->freq_table);
-
- return 0;
}
static int bmips_cpufreq_init(struct cpufreq_policy *policy)
@@ -152,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.get = bmips_cpufreq_get,
.init = bmips_cpufreq_init,
.exit = bmips_cpufreq_exit,
- .attr = cpufreq_generic_attr,
.name = BMIPS_CPUFREQ_PREFIX,
};
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index ffea6402189d..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
if (ret)
return ERR_PTR(ret);
- table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
+ /*
+ * We allocate space for the 5 different P-STATES AVS,
+ * plus extra space for a terminating element.
+ */
+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
@@ -470,16 +474,19 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
- return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
- (rc != -EINVAL));
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+ (rc != -EINVAL);
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct private_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ struct private_data *priv;
- cpufreq_cpu_put(policy);
+ if (!policy)
+ return 0;
+
+ priv = policy->driver_data;
return brcm_avs_get_frequency(priv->base);
}
@@ -711,7 +718,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
@@ -749,17 +755,15 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
-
- return 0;
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
- { .compatible = BRCM_AVS_CPU_DATA },
+ { .compatible = "brcm,avs-cpu-data-mem" },
{ }
};
MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 022e3555407c..9eac77c4f294 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -16,60 +16,25 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <uapi/linux/sched/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <acpi/cppc_acpi.h>
-/* Minimum struct length needed for the DMI processor entry we want */
-#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
-
-/* Offset in the DMI processor structure for the max frequency */
-#define DMI_PROCESSOR_MAX_SPEED 0x14
-
-/*
- * This list contains information parsed from per CPU ACPI _CPC and _PSD
- * structures: e.g. the highest and lowest supported performance, capabilities,
- * desired performance, level requested etc. Depending on the share_type, not
- * all CPUs will have an entry in the list.
- */
-static LIST_HEAD(cpu_data_list);
-
-static bool boost_supported;
-
-struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE + 1];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
- u32 oem_revision;
-};
-
-static struct cppc_workaround_oem_info wa_info[] = {
- {
- .oem_id = "HISI ",
- .oem_table_id = "HIP07 ",
- .oem_revision = 0,
- }, {
- .oem_id = "HISI ",
- .oem_table_id = "HIP08 ",
- .oem_revision = 0,
- }
-};
-
static struct cpufreq_driver cppc_cpufreq_driver;
+#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
static enum {
FIE_UNSET = -1,
FIE_ENABLED,
FIE_DISABLED
} fie_disabled = FIE_UNSET;
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
module_param(fie_disabled, int, 0444);
MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
@@ -85,9 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -123,8 +86,10 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
+ if (!perf)
+ return;
+
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
perf <<= SCHED_CAPACITY_SHIFT;
@@ -177,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
- __func__, cpu, ret);
- /*
- * Don't abort if the CPU was offline while the driver
- * was getting registered.
- */
- if (cpu_online(cpu))
- return;
+ /*
+ * Don't abort as the CPU was offline while the driver was
+ * getting registered.
+ */
+ if (ret && cpu_online(cpu)) {
+ pr_debug("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+ return;
}
}
@@ -231,9 +195,9 @@ static void __init cppc_freq_invariance_init(void)
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
*/
- .sched_runtime = 1000000,
- .sched_deadline = 10000000,
- .sched_period = 10000000,
+ .sched_runtime = NSEC_PER_MSEC,
+ .sched_deadline = 10 * NSEC_PER_MSEC,
+ .sched_period = 10 * NSEC_PER_MSEC,
};
int ret;
@@ -248,16 +212,20 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
- if (IS_ERR(kworker_fie))
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
+ if (IS_ERR(kworker_fie)) {
+ pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
+ PTR_ERR(kworker_fie));
+ fie_disabled = FIE_DISABLED;
return;
+ }
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
if (ret) {
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
ret);
kthread_destroy_worker(kworker_fie);
- return;
+ fie_disabled = FIE_DISABLED;
}
}
@@ -267,7 +235,6 @@ static void cppc_freq_invariance_exit(void)
return;
kthread_destroy_worker(kworker_fie);
- kworker_fie = NULL;
}
#else
@@ -288,110 +255,17 @@ static inline void cppc_freq_invariance_exit(void)
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-/* Callback function used to retrieve the max frequency from DMI */
-static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
-{
- const u8 *dmi_data = (const u8 *)dm;
- u16 *mhz = (u16 *)private;
-
- if (dm->type == DMI_ENTRY_PROCESSOR &&
- dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
- u16 val = (u16)get_unaligned((const u16 *)
- (dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
- }
-}
-
-/* Look up the max frequency in DMI */
-static u64 cppc_get_dmi_max_khz(void)
-{
- u16 mhz = 0;
-
- dmi_walk(cppc_find_dmi_mhz, &mhz);
-
- /*
- * Real stupid fallback value, just in case there is no
- * actual value set.
- */
- mhz = mhz ? mhz : 1;
-
- return (1000 * mhz);
-}
-
-/*
- * If CPPC lowest_freq and nominal_freq registers are exposed then we can
- * use them to convert perf to freq and vice versa. The conversion is
- * extrapolated as an affine function passing by the 2 points:
- * - (Low perf, Low freq)
- * - (Nominal perf, Nominal perf)
- */
-static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
- unsigned int perf)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
- div = caps->nominal_perf - caps->lowest_perf;
- offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = max_khz;
- div = caps->highest_perf;
- }
-
- retval = offset + div64_u64(perf * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
-static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
- unsigned int freq)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
- offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = caps->highest_perf;
- div = max_khz;
- }
-
- retval = offset + div64_u64(freq * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
-
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
- u32 desired_perf;
int ret = 0;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
- /* Return if it is exactly the same perf */
- if (desired_perf == cpu_data->perf_ctrls.desired_perf)
- return ret;
-
- cpu_data->perf_ctrls.desired_perf = desired_perf;
+ cpu_data->perf_ctrls.desired_perf =
+ cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
freqs.old = policy->cur;
freqs.new = target_freq;
@@ -414,7 +288,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
u32 desired_perf;
int ret;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -433,6 +307,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns < 0)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -455,19 +339,18 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
@@ -517,6 +400,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
struct cppc_cpudata *cpu_data;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
@@ -527,7 +413,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
@@ -547,8 +433,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
perf = step * perf_step;
}
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
/*
@@ -558,8 +444,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
}
@@ -584,11 +470,14 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
int step;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
@@ -597,7 +486,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
return 0;
}
-static int populate_efficiency_class(void)
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data;
+ struct em_data_callback em_cb =
+ EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
+
+ cpu_data = policy->driver_data;
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu),
+ get_perf_level_count(policy), &em_cb,
+ cpu_data->shared_cpu_map, 0);
+}
+
+static void populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
@@ -612,7 +513,7 @@ static int populate_efficiency_class(void)
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
- return -EINVAL;
+ return;
}
/*
@@ -629,26 +530,11 @@ static int populate_efficiency_class(void)
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
-
- return 0;
-}
-
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
- struct cppc_cpudata *cpu_data;
- struct em_data_callback em_cb =
- EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
-
- cpu_data = policy->driver_data;
- em_dev_register_perf_domain(get_cpu_device(policy->cpu),
- get_perf_level_count(policy), &em_cb,
- cpu_data->shared_cpu_map, 0);
}
#else
-static int populate_efficiency_class(void)
+static void populate_efficiency_class(void)
{
- return 0;
}
#endif
@@ -676,12 +562,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- /* Convert the lowest and nominal freq from MHz to KHz */
- cpu_data->perf_caps.lowest_freq *= 1000;
- cpu_data->perf_caps.nominal_freq *= 1000;
-
- list_add(&cpu_data->node, &cpu_data_list);
-
return cpu_data;
free_mask:
@@ -696,7 +576,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
- list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
@@ -721,20 +600,17 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
- policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
+ policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
+ caps->highest_perf : caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
- policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
+ policy->cpuinfo.max_freq = policy->max;
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -767,10 +643,10 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
- boost_supported = true;
+ policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
- policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
+ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -788,7 +664,7 @@ out:
return ret;
}
-static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
@@ -805,7 +681,6 @@ static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
caps->lowest_perf, cpu, ret);
cppc_cpufreq_put_cpu_data(policy);
- return 0;
}
static inline u64 get_delta(u64 t1, u64 t0)
@@ -816,8 +691,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -830,37 +704,71 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1->delivered,
fb_ctrs_t0->delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
+ /*
+ * Avoid divide-by zero and unchanged feedback counters.
+ * Leave it for callers to handle.
+ */
if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
+ return 0;
return (reference_perf * delta_delivered) / delta_reference;
}
+static int cppc_get_perf_ctrs_sample(int cpu,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
+{
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
+}
+
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
- cpufreq_cpu_put(policy);
+ if (!policy)
+ return 0;
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
- if (ret)
- return ret;
+ cpu_data = policy->driver_data;
- udelay(2); /* 2usec delay between sampling */
+ ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
+ if (ret) {
+ if (ret == -EFAULT)
+ /* Any of the associated CPPC regs is 0. */
+ goto out_invalid_counters;
+ else
+ return 0;
+ }
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
- if (ret)
- return ret;
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
+ if (!delivered_perf)
+ goto out_invalid_counters;
+
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+out_invalid_counters:
+ /*
+ * Feedback counters could be unchanged or 0 when a cpu enters a
+ * low-power idle state, e.g. clock-gated or power-gated.
+ * Use desired perf for reflecting frequency. Get the latest register
+ * value first as some platforms may update the actual delivered perf
+ * there; if failed, resort to the cached desired perf.
+ */
+ if (cppc_get_desired_perf(cpu, &delivered_perf))
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
@@ -869,17 +777,10 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
- if (!boost_supported) {
- pr_err("BOOST not supported by CPU or firmware\n");
- return -EINVAL;
- }
-
if (state)
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->highest_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
@@ -895,15 +796,124 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
+
+static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
+{
+ bool val;
+ int ret;
+
+ ret = cppc_get_auto_sel(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t store_auto_select(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_sel(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_auto_act_window(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_auto_act_window(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 usec;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &usec);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_act_window(policy->cpu, usec);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_epp_perf(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_epp(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
cpufreq_freq_attr_ro(freqdomain_cpus);
+cpufreq_freq_attr_rw(auto_select);
+cpufreq_freq_attr_rw(auto_act_window);
+cpufreq_freq_attr_rw(energy_performance_preference_val);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
+ &auto_select,
+ &auto_act_window,
+ &energy_performance_preference_val,
NULL,
};
static struct cpufreq_driver cppc_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
@@ -915,52 +925,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.name = "cppc_cpufreq",
};
-/*
- * HISI platform does not support delivered performance counter and
- * reference performance counter. It can calculate the performance using the
- * platform specific mechanism. We reuse the desired performance register to
- * store the real performance calculated by the platform.
- */
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
- u64 desired_perf;
- int ret;
-
- cpufreq_cpu_put(policy);
-
- ret = cppc_get_desired_perf(cpu, &desired_perf);
- if (ret < 0)
- return -EIO;
-
- return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
-}
-
-static void cppc_check_hisi_workaround(void)
-{
- struct acpi_table_header *tbl;
- acpi_status status = AE_OK;
- int i;
-
- status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
- if (ACPI_FAILURE(status) || !tbl)
- return;
-
- for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
- if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
- !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision) {
- /* Overwrite the get() callback */
- cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
- fie_disabled = FIE_DISABLED;
- break;
- }
- }
-
- acpi_put_table(tbl);
-}
-
static int __init cppc_cpufreq_init(void)
{
int ret;
@@ -968,7 +932,6 @@ static int __init cppc_cpufreq_init(void)
if (!acpi_cpc_valid())
return -ENODEV;
- cppc_check_hisi_workaround();
cppc_freq_invariance_init();
populate_efficiency_class();
@@ -979,24 +942,10 @@ static int __init cppc_cpufreq_init(void)
return ret;
}
-static inline void free_cpu_data(void)
-{
- struct cppc_cpudata *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
- free_cpumask_var(iter->shared_cpu_map);
- list_del(&iter->node);
- kfree(iter);
- }
-
-}
-
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
-
- free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index e2b20080de3a..a1d11ecd1ac8 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "starfive,jh7110", },
+ { .compatible = "starfive,jh7110s", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
@@ -103,7 +104,14 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
+ { .compatible = "airoha,en7581", },
+
+ { .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "allwinner,sun50i-h616", },
+ { .compatible = "allwinner,sun50i-h618", },
+ { .compatible = "allwinner,sun50i-h700", },
{ .compatible = "apple,arm-platform", },
@@ -137,27 +145,40 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra20", },
{ .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8909", },
{ .compatible = "qcom,msm8996", },
+ { .compatible = "qcom,msm8998", },
+ { .compatible = "qcom,qcm2290", },
+ { .compatible = "qcom,qcm6490", },
{ .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,qdu1000", },
{ .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sa8540p" },
+ { .compatible = "qcom,sa8775p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
+ { .compatible = "qcom,sdm670", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sdx75", },
{ .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm6375", },
{ .compatible = "qcom,sm7225", },
+ { .compatible = "qcom,sm7325", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
+ { .compatible = "qcom,sm8450", },
+ { .compatible = "qcom,sm8550", },
+ { .compatible = "qcom,sm8650", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
@@ -169,8 +190,15 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
+ { .compatible = "ti,am62p5", },
+ { .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
+ { .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
+ { .compatible = "qcom,ipq8074", },
+ { .compatible = "qcom,ipq9574", },
{ .compatible = "qcom,apq8064", },
{ .compatible = "qcom,msm8974", },
{ .compatible = "qcom,msm8960", },
@@ -180,42 +208,31 @@ static const struct of_device_id blocklist[] __initconst = {
static bool __init cpu0_node_has_opp_v2_prop(void)
{
- struct device_node *np = of_cpu_device_node_get(0);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
bool ret = false;
if (of_property_present(np, "operating-points-v2"))
ret = true;
- of_node_put(np);
return ret;
}
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
- const struct of_device_id *match;
- const void *data = NULL;
-
- if (!np)
- return -ENODEV;
+ const void *data;
- match = of_match_node(allowlist, np);
- if (match) {
- data = match->data;
+ data = of_machine_get_match_data(allowlist);
+ if (data)
goto create_pdev;
- }
- if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
+ if (cpu0_node_has_opp_v2_prop() && !of_machine_device_match(blocklist))
goto create_pdev;
- of_node_put(np);
return -ENODEV;
create_pdev:
- of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
-1, data,
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 4aec4b2a5225..7d5079fd1688 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,12 +36,6 @@ struct private_data {
static LIST_HEAD(priv_list);
-static struct freq_attr *cpufreq_dt_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Extra space for boost-attr if required */
- NULL,
-};
-
static struct private_data *cpufreq_dt_find_data(int cpu)
{
struct private_data *priv;
@@ -68,36 +62,22 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
*/
static const char *find_supply_name(struct device *dev)
{
- struct device_node *np;
- struct property *pp;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
int cpu = dev->id;
- const char *name = NULL;
-
- np = of_node_get(dev->of_node);
/* This must be valid for sure */
if (WARN_ON(!np))
return NULL;
/* Try "cpu0" for older DTs */
- if (!cpu) {
- pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp) {
- name = "cpu0";
- goto node_put;
- }
- }
+ if (!cpu && of_property_present(np, "cpu0-supply"))
+ return "cpu0";
- pp = of_find_property(np, "cpu-supply", NULL);
- if (pp) {
- name = "cpu";
- goto node_put;
- }
+ if (of_property_present(np, "cpu-supply"))
+ return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
-node_put:
- of_node_put(np);
- return name;
+ return NULL;
}
static int cpufreq_init(struct cpufreq_policy *policy)
@@ -124,7 +104,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
@@ -134,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- /* Support turbo/boost mode */
- if (policy_has_boost_freq(policy)) {
- /* This gets disabled by core on driver unregister */
- ret = cpufreq_enable_boost_support();
- if (ret)
- goto out_clk_put;
- cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- }
-
return 0;
-
-out_clk_put:
- clk_put(cpu_clk);
-
- return ret;
}
static int cpufreq_online(struct cpufreq_policy *policy)
@@ -166,10 +132,9 @@ static int cpufreq_offline(struct cpufreq_policy *policy)
return 0;
}
-static int cpufreq_exit(struct cpufreq_policy *policy)
+static void cpufreq_exit(struct cpufreq_policy *policy)
{
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver dt_cpufreq_driver = {
@@ -184,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
- .attr = cpufreq_dt_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -208,7 +173,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv)
return -ENOMEM;
- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
@@ -318,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
if (ret)
goto err;
@@ -349,11 +314,10 @@ err:
return ret;
}
-static int dt_cpufreq_remove(struct platform_device *pdev)
+static void dt_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&dt_cpufreq_driver);
dt_cpufreq_release();
- return 0;
}
static struct platform_driver dt_cpufreq_platdrv = {
@@ -365,6 +329,17 @@ static struct platform_driver dt_cpufreq_platdrv = {
};
module_platform_driver(dt_cpufreq_platdrv);
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev)
+{
+ struct platform_device_info cpufreq_dt_devinfo = {};
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = dev;
+
+ return platform_device_register_full(&cpufreq_dt_devinfo);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dt_pdev_register);
+
MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index 28c8af7ec5ef..fc1889aeb4f1 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -22,4 +22,6 @@ struct cpufreq_dt_platform_data {
int (*resume)(struct cpufreq_policy *policy);
};
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev);
+
#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index f7a7bcf6f52e..fbbbe501cf2d 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb)
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
+ pci_dev_put(nforce2_sub5);
+
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
@@ -359,11 +361,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int nforce2_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
@@ -371,7 +368,6 @@ static struct cpufreq_driver nforce2_driver = {
.target = nforce2_target,
.get = nforce2_get,
.init = nforce2_cpu_init,
- .exit = nforce2_cpu_exit,
};
#ifdef MODULE
@@ -432,6 +428,7 @@ static int __init nforce2_init(void)
static void __exit nforce2_exit(void)
{
cpufreq_unregister_driver(&nforce2_driver);
+ pci_dev_put(nforce2_dev);
}
module_init(nforce2_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 50bbc969ffe5..4472bb1ec83c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -86,6 +87,8 @@ static void cpufreq_governor_limits(struct cpufreq_policy *policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
+static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -106,6 +109,8 @@ void disable_cpufreq(void)
{
off = 1;
}
+EXPORT_SYMBOL_GPL(disable_cpufreq);
+
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
@@ -252,51 +257,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-/**
- * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
- * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
- */
-void cpufreq_cpu_release(struct cpufreq_policy *policy)
-{
- if (WARN_ON(!policy))
- return;
-
- lockdep_assert_held(&policy->rwsem);
-
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
-}
-
-/**
- * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
- * @cpu: CPU to find the policy for.
- *
- * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
- * if the policy returned by it is not NULL, acquire its rwsem for writing.
- * Return the policy if it is active or release it and return NULL otherwise.
- *
- * The policy returned by this function has to be released with the help of
- * cpufreq_cpu_release() in order to release its rwsem and balance its usage
- * counter properly.
- */
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return NULL;
-
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy)) {
- cpufreq_cpu_release(policy);
- return NULL;
- }
-
- return policy;
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -453,10 +413,12 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus,
policy->cur,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
+ spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
policy->transition_task = NULL;
+ spin_unlock(&policy->transition_lock);
wake_up(&policy->transition_wait);
}
@@ -531,16 +493,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -560,7 +524,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -572,21 +550,11 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
return policy->transition_delay_us;
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (latency) {
- /*
- * For platforms that can change the frequency very fast (< 10
- * us), the above formula gives a decent transition delay. But
- * for platforms where transition_latency is in milliseconds, it
- * ends up giving unrealistic values.
- *
- * Cap the default transition delay to 10 ms, which seems to be
- * a reasonable amount of time after which we should reevaluate
- * the frequency.
- */
- return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
- }
+ if (latency)
+ /* Give a 50% breathing room between updates */
+ return latency + (latency >> 1);
- return LATENCY_MULTIPLIER;
+ return USEC_PER_MSEC;
}
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
@@ -596,31 +564,75 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
static ssize_t show_boost(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
}
static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
- int ret, enable;
+ bool enable;
- ret = sscanf(buf, "%d", &enable);
- if (ret != 1 || enable < 0 || enable > 1)
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
define_one_global_rw(boost);
+static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", policy->boost_enabled);
+}
+
+static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
+{
+ int ret;
+
+ if (policy->boost_enabled == enable)
+ return 0;
+
+ policy->boost_enabled = enable;
+
+ ret = cpufreq_driver->set_boost(policy, enable);
+ if (ret)
+ policy->boost_enabled = !policy->boost_enabled;
+
+ return ret;
+}
+
+static ssize_t store_local_boost(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ int ret;
+ bool enable;
+
+ if (kstrtobool(buf, &enable))
+ return -EINVAL;
+
+ if (!cpufreq_driver->boost_enabled)
+ return -EINVAL;
+
+ if (!policy->boost_supported)
+ return -EINVAL;
+
+ ret = policy_set_boost(policy, enable);
+ if (!ret)
+ return count;
+
+ return ret;
+}
+
+static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
+
static struct cpufreq_governor *find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
@@ -652,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -691,7 +703,7 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf(buf, "%u\n", policy->object); \
+ return sysfs_emit(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
@@ -700,23 +712,31 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
- ret = sprintf(buf, "%u\n", freq);
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
+
+ if (freq > 0)
+ ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
- ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
+ ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
- ret = sprintf(buf, "%u\n", policy->cur);
+ ret = sysfs_emit(buf, "%u\n", policy->cur);
return ret;
}
@@ -750,9 +770,22 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
unsigned int cur_freq = __cpufreq_get(policy);
if (cur_freq)
- return sprintf(buf, "%u\n", cur_freq);
+ return sysfs_emit(buf, "%u\n", cur_freq);
- return sprintf(buf, "<unknown>\n");
+ return sysfs_emit(buf, "<unknown>\n");
+}
+
+/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
}
/*
@@ -761,12 +794,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
- return sprintf(buf, "powersave\n");
+ return sysfs_emit(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
- return sprintf(buf, "performance\n");
+ return sysfs_emit(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
- policy->governor->name);
+ return sysfs_emit(buf, "%s\n", policy->governor->name);
return -EINVAL;
}
@@ -776,7 +808,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- char str_governor[16];
+ char str_governor[CPUFREQ_NAME_LEN];
int ret;
ret = sscanf(buf, "%15s", str_governor);
@@ -825,7 +857,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
struct cpufreq_governor *t;
if (!has_target()) {
- i += sprintf(buf, "performance powersave");
+ i += sysfs_emit(buf, "performance powersave");
goto out;
}
@@ -834,11 +866,11 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
break;
- i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ i += sysfs_emit_at(buf, i, "%s ", t->name);
}
mutex_unlock(&cpufreq_governor_mutex);
out:
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
@@ -848,7 +880,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
unsigned int cpu;
for_each_cpu(cpu, mask) {
- i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
+ i += sysfs_emit_at(buf, i, "%u ", cpu);
if (i >= (PAGE_SIZE - 5))
break;
}
@@ -856,7 +888,7 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
/* Remove the extra space at the end */
i--;
- i += sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
return i;
}
EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
@@ -882,14 +914,14 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
- ret = sscanf(buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &freq);
+ if (ret)
+ return ret;
policy->governor->store_setspeed(policy, freq);
@@ -899,7 +931,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
if (!policy->governor || !policy->governor->show_setspeed)
- return sprintf(buf, "<unsupported>\n");
+ return sysfs_emit(buf, "<unsupported>\n");
return policy->governor->show_setspeed(policy, buf);
}
@@ -913,11 +945,12 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
int ret;
ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
if (!ret)
- return sprintf(buf, "%u\n", limit);
- return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+ return sysfs_emit(buf, "%u\n", limit);
+ return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -936,6 +969,7 @@ static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
+ &scaling_cur_freq.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@@ -955,17 +989,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
- down_read(&policy->rwsem);
+ guard(cpufreq_policy_read)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->show(policy, buf);
- up_read(&policy->rwsem);
+ return fattr->show(policy, buf);
- return ret;
+ return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -973,17 +1006,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
+ return fattr->store(policy, buf, count);
- return ret;
+ return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1031,6 +1063,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1045,9 +1092,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
- if (ret)
- return ret;
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
@@ -1055,6 +1104,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1066,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1114,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (has_target())
cpufreq_stop_governor(policy);
@@ -1125,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- up_write(&policy->rwsem);
+
return ret;
}
@@ -1145,9 +1202,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
- down_write(&policy->rwsem);
+
+ guard(cpufreq_policy_write)(policy);
+
refresh_frequency_limits(policy);
- up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1173,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
- down_write(&policy->rwsem);
- cpufreq_stats_free_table(policy);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ }
kobject_put(kobj);
/*
@@ -1226,6 +1284,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
+ init_rwsem(&policy->rwsem);
+
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
@@ -1234,26 +1294,24 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
&policy->nb_min);
if (ret) {
- dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
- ret, cpumask_pr_args(policy->cpus));
+ dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
+ ret, cpu);
goto err_kobj_remove;
}
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
&policy->nb_max);
if (ret) {
- dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
- ret, cpumask_pr_args(policy->cpus));
+ dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
+ ret, cpu);
goto err_min_qos_notifier;
}
INIT_LIST_HEAD(&policy->policy_list);
- init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1322,35 +1380,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static int cpufreq_online(unsigned int cpu)
+static int cpufreq_policy_online(struct cpufreq_policy *policy,
+ unsigned int cpu, bool new_policy)
{
- struct cpufreq_policy *policy;
- bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
- pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
-
- /* Check if this CPU already has a policy to manage it */
- policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy) {
- WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- if (!policy_is_inactive(policy))
- return cpufreq_add_policy_cpu(policy, cpu);
+ guard(cpufreq_policy_write)(policy);
- /* This is the only online CPU for the policy. Start over. */
- new_policy = false;
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- policy->governor = NULL;
- } else {
- new_policy = true;
- policy = cpufreq_policy_alloc(cpu);
- if (!policy)
- return -ENOMEM;
- down_write(&policy->rwsem);
- }
+ policy->cpu = cpu;
+ policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1373,7 +1413,7 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
- goto out_free_policy;
+ goto out_clear_policy;
}
/*
@@ -1381,9 +1421,12 @@ static int cpufreq_online(unsigned int cpu)
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_offline_policy;
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
+ policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_offline_policy;
+ }
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -1438,6 +1481,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1483,7 +1530,7 @@ static int cpufreq_online(unsigned int cpu)
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
- pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
@@ -1501,7 +1548,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sched_cpufreq_governor_change() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -1520,19 +1567,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
- up_write(&policy->rwsem);
-
- kobject_uevent(&policy->kobj, KOBJ_ADD);
-
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
- policy->cdev = of_cpufreq_cooling_register(policy);
-
- pr_debug("initialization complete\n");
-
return 0;
out_destroy_policy:
@@ -1547,14 +1581,72 @@ out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
-out_free_policy:
+out_clear_policy:
cpumask_clear(policy->cpus);
- up_write(&policy->rwsem);
- cpufreq_policy_free(policy);
return ret;
}
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ }
+
+ ret = cpufreq_policy_online(policy, cpu, new_policy);
+ if (ret) {
+ cpufreq_policy_free(policy);
+ return ret;
+ }
+
+ kobject_uevent(&policy->kobj, KOBJ_ADD);
+
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
+ /*
+ * Let the per-policy boost flag mirror the cpufreq_driver boost during
+ * initialization for a new policy. For an existing policy, maintain the
+ * previous boost value unless global boost is disabled.
+ */
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
+ (new_policy || !cpufreq_boost_enabled())) {
+ ret = policy_set_boost(policy, cpufreq_boost_enabled());
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ str_enable_disable(cpufreq_boost_enabled()));
+ }
+ }
+
+ pr_debug("initialization complete\n");
+
+ return 0;
+}
+
/**
* cpufreq_add_dev - the cpufreq interface for a CPU device.
* @dev: CPU device.
@@ -1606,30 +1698,27 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
return;
}
- if (has_target())
- strncpy(policy->last_governor, policy->governor->name,
+ if (has_target()) {
+ strscpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
- else
+ cpufreq_exit_governor(policy);
+ } else {
policy->last_policy = policy->policy;
-
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
}
- if (has_target())
- cpufreq_exit_governor(policy);
-
/*
* Perform the ->offline() during light-weight tear-down, as
* that allows fast recovery when the CPU comes back.
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
- } else if (cpufreq_driver->exit) {
- cpufreq_driver->exit(policy);
- policy->freq_table = NULL;
+ return;
}
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
@@ -1644,11 +1733,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
- up_write(&policy->rwsem);
return 0;
}
@@ -1665,23 +1753,28 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- down_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
- if (cpu_online(cpu))
- __cpufreq_offline(cpu, policy);
+ remove_cpu_dev_symlink(policy, cpu, dev);
- remove_cpu_dev_symlink(policy, cpu, dev);
+ if (!cpumask_empty(policy->real_cpus))
+ return;
- if (!cpumask_empty(policy->real_cpus)) {
- up_write(&policy->rwsem);
- return;
- }
-
- /* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline)
- cpufreq_driver->exit(policy);
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy
+ * are removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
- up_write(&policy->rwsem);
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+ }
cpufreq_policy_free(policy);
}
@@ -1713,6 +1806,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
{
unsigned int new_freq;
+ if (!cpufreq_driver->get)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1752,27 +1848,25 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy;
- unsigned int ret_freq = 0;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
- ret_freq = cpufreq_driver->get(cpu);
+ unsigned int ret_freq = cpufreq_driver->get(cpu);
+
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret_freq = policy->cur;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cur;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1784,15 +1878,11 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- ret_freq = policy->max;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->max;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1804,15 +1894,11 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- ret_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- }
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cpuinfo.max_freq;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1832,19 +1918,13 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
-
- if (policy) {
- down_read(&policy->rwsem);
- if (cpufreq_driver->get)
- ret_freq = __cpufreq_get(policy);
- up_read(&policy->rwsem);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
- cpufreq_cpu_put(policy);
- }
+ guard(cpufreq_policy_read)(policy);
- return ret_freq;
+ return __cpufreq_get(policy);
}
EXPORT_SYMBOL(cpufreq_get);
@@ -1903,9 +1983,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
- down_write(&policy->rwsem);
- cpufreq_stop_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stop_governor(policy);
+ }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -1943,16 +2023,16 @@ void cpufreq_resume(void)
for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
- pr_err("%s: Failed to resume driver: %p\n", __func__,
- policy);
+ pr_err("%s: Failed to resume driver: %s\n", __func__,
+ cpufreq_driver->name);
} else if (has_target()) {
- down_write(&policy->rwsem);
- ret = cpufreq_start_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ ret = cpufreq_start_governor(policy);
+ }
if (ret)
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
+ pr_err("%s: Failed to start governor for CPU%u's policy\n",
+ __func__, policy->cpu);
}
}
}
@@ -2131,7 +2211,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
policy->cur = freq;
arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) {
@@ -2278,7 +2358,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2315,15 +2396,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
-
- down_write(&policy->rwsem);
-
- ret = __cpufreq_driver_target(policy, target_freq, relation);
+ guard(cpufreq_policy_write)(policy);
- up_write(&policy->rwsem);
-
- return ret;
+ return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2403,8 +2478,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (cpufreq_driver->get)
- cpufreq_verify_current_freq(policy, false);
+ cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@@ -2479,7 +2553,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
for_each_inactive_policy(policy) {
if (!strcmp(policy->last_governor, governor->name)) {
policy->governor = NULL;
- strcpy(policy->last_governor, "\0");
+ policy->last_governor[0] = '\0';
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2495,30 +2569,39 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
+DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
+
/**
- * cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy
- * is written
- * @cpu: CPU to find the policy for
+ * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
+ * @policy: cpufreq policy of the CPUs.
*
- * Reads the current cpufreq policy.
+ * Update the value of cpufreq pressure for all @cpus in the policy.
*/
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
+static void cpufreq_update_pressure(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *cpu_policy;
- if (!policy)
- return -EINVAL;
+ unsigned long max_capacity, capped_freq, pressure;
+ u32 max_freq;
+ int cpu;
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
+ cpu = cpumask_first(policy->related_cpus);
+ max_freq = arch_scale_freq_ref(cpu);
+ capped_freq = policy->max;
- memcpy(policy, cpu_policy, sizeof(*policy));
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the cpufreq pressure value.
+ */
+ if (max_freq <= capped_freq) {
+ pressure = 0;
+ } else {
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ pressure = max_capacity -
+ mult_frac(max_capacity, capped_freq, max_freq);
+ }
- cpufreq_cpu_put(cpu_policy);
- return 0;
+ for_each_cpu(cpu, policy->related_cpus)
+ WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
}
-EXPORT_SYMBOL(cpufreq_get_policy);
/**
* cpufreq_set_policy - Modify cpufreq policy parameters.
@@ -2568,13 +2651,22 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
+ cpufreq_update_pressure(policy);
+
policy->cached_target_freq = UINT_MAX;
pr_debug("new min and max freqs are %u - %u kHz\n",
@@ -2609,7 +2701,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
ret = cpufreq_start_governor(policy);
if (!ret) {
pr_debug("governor change\n");
- sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
cpufreq_exit_governor(policy);
@@ -2619,15 +2710,32 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_init_governor(policy))
+ if (cpufreq_init_governor(policy)) {
policy->governor = NULL;
- else
- cpufreq_start_governor(policy);
+ } else if (cpufreq_start_governor(policy)) {
+ cpufreq_exit_governor(policy);
+ policy->governor = NULL;
+ }
}
return ret;
}
+static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
+{
+ guard(cpufreq_policy_write)(policy);
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ return;
+
+ refresh_frequency_limits(policy);
+}
+
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2639,23 +2747,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
- /*
- * BIOS might change freq behind our back
- * -> ask driver for current freq and notify governors about a change
- */
- if (cpufreq_driver->get && has_target() &&
- (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
- goto unlock;
-
- refresh_frequency_limits(policy);
-
-unlock:
- cpufreq_cpu_release(policy);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2664,28 +2760,32 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
- * cpufreq_update_policy() for @cpu.
+ * cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
if (cpufreq_driver->update_limits)
- cpufreq_driver->update_limits(cpu);
+ cpufreq_driver->update_limits(policy);
else
- cpufreq_update_policy(cpu);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2697,15 +2797,18 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
int ret = 0;
- if (cpufreq_driver->boost_enabled == state)
- return 0;
+ /*
+ * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
+ * make sure all policies are in sync with global boost flag.
+ */
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2713,7 +2816,10 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
- ret = cpufreq_driver->set_boost(policy, state);
+ if (!policy->boost_supported)
+ continue;
+
+ ret = policy_set_boost(policy, state);
if (ret)
goto err_reset_state;
}
@@ -2729,7 +2835,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
@@ -2757,22 +2863,7 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
-int cpufreq_boost_enabled(void)
+bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
}
@@ -2823,10 +2914,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -2846,15 +2935,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -2864,6 +2944,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2891,6 +2980,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -2951,10 +3042,38 @@ static int __init cpufreq_core_init(void)
BUG_ON(!cpufreq_global_kobject);
if (!strlen(default_governor))
- strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
+ strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+ return false;
+ }
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b6bd0ff35323..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,11 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input > 100 ||
- input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -202,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -227,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 85da677c43d6..1a7fcaf39cc9 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ /*
+ * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
+ * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
+ * off, where idle_time is calculated by the difference between
+ * time elapsed in jiffies and "busy time" obtained from CPU
+ * statistics. If a CPU is 100% busy, the time elapsed and busy
+ * time should grow with the same amount in two consecutive
+ * samples, but in practice there could be a tiny difference,
+ * making the accumulated idle time decrease sometimes. Hence,
+ * in this case, idle_time should be regarded as 0 in order to
+ * make the further process correct.
+ */
+ if (cur_idle_time > j_cdbs->prev_cpu_idle)
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ else
+ idle_time = 0;
+
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ } else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- if (time_elapsed >= idle_time) {
+ if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
- } else {
- /*
- * That can happen if idle_time is returned by
- * get_cpu_idle_time_jiffy(). In that case
- * idle_time is roughly equal to the difference
- * between time_elapsed and "busy time" obtained
- * from CPU statistics. Then, the "busy time"
- * can end up being greater than time_elapsed
- * (for example, if jiffies_64 and the CPU
- * statistics are updated by different CPUs),
- * so idle_time may in fact be negative. That
- * means, though, that the CPU was busy all
- * the time (on the rough average) during the
- * last sampling interval and 100 can be
- * returned as the load.
- */
- load = (int)idle_time < 0 ? 100 : 0;
- }
+ else
+ load = 0;
+
j_cdbs->prev_load = load;
}
- if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
@@ -439,7 +440,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
ret = gov->init(dbs_data);
if (ret)
- goto free_policy_dbs_info;
+ goto free_dbs_data;
/*
* The sampling interval should not be less than the transition latency
@@ -474,6 +475,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data);
+
+free_dbs_data:
kfree(dbs_data);
free_policy_dbs_info:
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c52d19d67557..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -22,7 +22,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
-#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (1)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
@@ -31,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -77,7 +53,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 55c7ffd37d1c..40a9ff18da06 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, " From : To\n");
len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+ if (len >= PAGE_SIZE - 1)
+ return PAGE_SIZE - 1;
len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
if (pending)
@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, "%9u ", count);
}
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "\n");
}
- if (len >= PAGE_SIZE) {
+ if (len >= PAGE_SIZE - 1) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}
@@ -243,7 +243,8 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
/* Find valid-unique entries */
cpufreq_for_each_valid_entry(pos, policy->freq_table)
- if (freq_table_get_index(stats, pos->frequency) == -1)
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_UNSORTED ||
+ freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
stats->state_num = i;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 50a4d7846580..77d62152cd38 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -15,8 +15,11 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
-static DEFINE_MUTEX(userspace_mutex);
+struct userspace_policy {
+ unsigned int is_managed;
+ unsigned int setspeed;
+ struct mutex mutex;
+};
/**
* cpufreq_set - set the CPU frequency
@@ -28,19 +31,19 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
- mutex_lock(&userspace_mutex);
- if (!per_cpu(cpu_is_managed, policy->cpu))
+ mutex_lock(&userspace->mutex);
+ if (!userspace->is_managed)
goto err;
- *setspeed = freq;
+ userspace->setspeed = freq;
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
- mutex_unlock(&userspace_mutex);
+ mutex_unlock(&userspace->mutex);
return ret;
}
@@ -51,67 +54,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
{
- unsigned int *setspeed;
+ struct userspace_policy *userspace;
- setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
- if (!setspeed)
+ userspace = kzalloc(sizeof(*userspace), GFP_KERNEL);
+ if (!userspace)
return -ENOMEM;
- policy->governor_data = setspeed;
+ mutex_init(&userspace->mutex);
+
+ policy->governor_data = userspace;
return 0;
}
+/*
+ * Any routine that writes to the policy struct will hold the "rwsem" of
+ * policy struct that means it is free to free "governor_data" here.
+ */
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
{
- mutex_lock(&userspace_mutex);
kfree(policy->governor_data);
policy->governor_data = NULL;
- mutex_unlock(&userspace_mutex);
}
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", policy->cpu);
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, policy->cpu) = 1;
- *setspeed = policy->cur;
- mutex_unlock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
+ userspace->is_managed = 1;
+ userspace->setspeed = policy->cur;
+ mutex_unlock(&userspace->mutex);
return 0;
}
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
pr_debug("managing cpu %u stopped\n", policy->cpu);
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, policy->cpu) = 0;
- *setspeed = 0;
- mutex_unlock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
+ userspace->is_managed = 0;
+ userspace->setspeed = 0;
+ mutex_unlock(&userspace->mutex);
}
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
- mutex_lock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
- policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
-
- if (policy->max < *setspeed)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > *setspeed)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
+
+ if (policy->max < userspace->setspeed)
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ else if (policy->min > userspace->setspeed)
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_L);
else
- __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, userspace->setspeed,
+ CPUFREQ_RELATION_L);
- mutex_unlock(&userspace_mutex);
+ mutex_unlock(&userspace->mutex);
}
static struct cpufreq_governor cpufreq_gov_userspace = {
@@ -124,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index ebb3a8102681..2c277eb3795a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
- .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
@@ -131,7 +130,7 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
return cpufreq_register_driver(&davinci_driver);
}
-static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+static void __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&davinci_driver);
@@ -139,8 +138,6 @@ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
-
- return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index ab93bce8ae77..320a0af2266a 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
- wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ wrmsrq(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
@@ -360,14 +360,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int eps_cpu_exit(struct cpufreq_policy *policy)
+static void eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
/* Bye */
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
- return 0;
}
static struct cpufreq_driver eps_driver = {
@@ -377,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 4ce5eb35dc46..fc5a58088b35 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -21,7 +21,6 @@
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
@@ -194,7 +193,6 @@ static struct cpufreq_driver elanfreq_driver = {
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c4d4643b6ca6..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,7 +14,7 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
-bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
@@ -27,24 +27,22 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-EXPORT_SYMBOL_GPL(policy_has_boost_freq);
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if (!cpufreq_boost_enabled()
+ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -66,11 +64,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
- unsigned int freq, next_larger = ~0;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ unsigned int freq, prev_smaller = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
@@ -86,12 +83,12 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
break;
}
- if ((next_larger > freq) && (freq > policy->max))
- next_larger = freq;
+ if ((prev_smaller < freq) && (freq <= policy->max))
+ prev_smaller = freq;
}
if (!found) {
- policy->max = next_larger;
+ policy->max = prev_smaller;
cpufreq_verify_within_cpu_limits(policy);
}
@@ -111,13 +108,13 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -129,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -148,7 +145,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if ((freq < policy->min) || (freq > policy->max))
+ if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;
@@ -194,7 +191,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
}
if (optimal.driver_data > i) {
if (suboptimal.driver_data > i) {
- WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ WARN(1, "Invalid frequency table: %u\n", policy->cpu);
return 0;
}
@@ -254,7 +251,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- count += sprintf(&buf[count], "%d ", pos->frequency);
+ count += sprintf(&buf[count], "%u ", pos->frequency);
}
count += sprintf(&buf[count], "\n");
@@ -276,7 +273,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
@@ -288,13 +284,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
-
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
@@ -363,10 +352,14 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
deleted file mode 100644
index c6bdc455517f..000000000000
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ /dev/null
@@ -1,353 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This file provides the ACPI based P-state support. This
- * module works with generic cpufreq infrastructure. Most of
- * the code is based on i386 version
- * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
- *
- * Copyright (C) 2005 Intel Corp
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/proc_fs.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pal.h>
-
-#include <linux/acpi.h>
-#include <acpi/processor.h>
-
-MODULE_AUTHOR("Venkatesh Pallipadi");
-MODULE_DESCRIPTION("ACPI Processor P-States Driver");
-MODULE_LICENSE("GPL");
-
-struct cpufreq_acpi_io {
- struct acpi_processor_performance acpi_data;
- unsigned int resume;
-};
-
-struct cpufreq_acpi_req {
- unsigned int cpu;
- unsigned int state;
-};
-
-static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
-
-static struct cpufreq_driver acpi_cpufreq_driver;
-
-
-static int
-processor_set_pstate (
- u32 value)
-{
- s64 retval;
-
- pr_debug("processor_set_pstate\n");
-
- retval = ia64_pal_set_pstate((u64)value);
-
- if (retval) {
- pr_debug("Failed to set freq to 0x%x, with error 0x%llx\n",
- value, retval);
- return -ENODEV;
- }
- return (int)retval;
-}
-
-
-static int
-processor_get_pstate (
- u32 *value)
-{
- u64 pstate_index = 0;
- s64 retval;
-
- pr_debug("processor_get_pstate\n");
-
- retval = ia64_pal_get_pstate(&pstate_index,
- PAL_GET_PSTATE_TYPE_INSTANT);
- *value = (u32) pstate_index;
-
- if (retval)
- pr_debug("Failed to get current freq with "
- "error 0x%llx, idx 0x%x\n", retval, *value);
-
- return (int)retval;
-}
-
-
-/* To be used only after data->acpi_data is initialized */
-static unsigned
-extract_clock (
- struct cpufreq_acpi_io *data,
- unsigned value)
-{
- unsigned long i;
-
- pr_debug("extract_clock\n");
-
- for (i = 0; i < data->acpi_data.state_count; i++) {
- if (value == data->acpi_data.states[i].status)
- return data->acpi_data.states[i].core_frequency;
- }
- return data->acpi_data.states[i-1].core_frequency;
-}
-
-
-static long
-processor_get_freq (
- void *arg)
-{
- struct cpufreq_acpi_req *req = arg;
- unsigned int cpu = req->cpu;
- struct cpufreq_acpi_io *data = acpi_io_data[cpu];
- u32 value;
- int ret;
-
- pr_debug("processor_get_freq\n");
- if (smp_processor_id() != cpu)
- return -EAGAIN;
-
- /* processor_get_pstate gets the instantaneous frequency */
- ret = processor_get_pstate(&value);
- if (ret) {
- pr_warn("get performance failed with error %d\n", ret);
- return ret;
- }
- return 1000 * extract_clock(data, value);
-}
-
-
-static long
-processor_set_freq (
- void *arg)
-{
- struct cpufreq_acpi_req *req = arg;
- unsigned int cpu = req->cpu;
- struct cpufreq_acpi_io *data = acpi_io_data[cpu];
- int ret, state = req->state;
- u32 value;
-
- pr_debug("processor_set_freq\n");
- if (smp_processor_id() != cpu)
- return -EAGAIN;
-
- if (state == data->acpi_data.state) {
- if (unlikely(data->resume)) {
- pr_debug("Called after resume, resetting to P%d\n", state);
- data->resume = 0;
- } else {
- pr_debug("Already at target state (P%d)\n", state);
- return 0;
- }
- }
-
- pr_debug("Transitioning from P%d to P%d\n",
- data->acpi_data.state, state);
-
- /*
- * First we write the target state's 'control' value to the
- * control_register.
- */
- value = (u32) data->acpi_data.states[state].control;
-
- pr_debug("Transitioning to state: 0x%08x\n", value);
-
- ret = processor_set_pstate(value);
- if (ret) {
- pr_warn("Transition failed with error %d\n", ret);
- return -ENODEV;
- }
-
- data->acpi_data.state = state;
- return 0;
-}
-
-
-static unsigned int
-acpi_cpufreq_get (
- unsigned int cpu)
-{
- struct cpufreq_acpi_req req;
- long ret;
-
- req.cpu = cpu;
- ret = work_on_cpu(cpu, processor_get_freq, &req);
-
- return ret > 0 ? (unsigned int) ret : 0;
-}
-
-
-static int
-acpi_cpufreq_target (
- struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct cpufreq_acpi_req req;
-
- req.cpu = policy->cpu;
- req.state = index;
-
- return work_on_cpu(req.cpu, processor_set_freq, &req);
-}
-
-static int
-acpi_cpufreq_cpu_init (
- struct cpufreq_policy *policy)
-{
- unsigned int i;
- unsigned int cpu = policy->cpu;
- struct cpufreq_acpi_io *data;
- unsigned int result = 0;
- struct cpufreq_frequency_table *freq_table;
-
- pr_debug("acpi_cpufreq_cpu_init\n");
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return (-ENOMEM);
-
- acpi_io_data[cpu] = data;
-
- result = acpi_processor_register_performance(&data->acpi_data, cpu);
-
- if (result)
- goto err_free;
-
- /* capability check */
- if (data->acpi_data.state_count <= 1) {
- pr_debug("No P-States\n");
- result = -ENODEV;
- goto err_unreg;
- }
-
- if ((data->acpi_data.control_register.space_id !=
- ACPI_ADR_SPACE_FIXED_HARDWARE) ||
- (data->acpi_data.status_register.space_id !=
- ACPI_ADR_SPACE_FIXED_HARDWARE)) {
- pr_debug("Unsupported address space [%d, %d]\n",
- (u32) (data->acpi_data.control_register.space_id),
- (u32) (data->acpi_data.status_register.space_id));
- result = -ENODEV;
- goto err_unreg;
- }
-
- /* alloc freq_table */
- freq_table = kcalloc(data->acpi_data.state_count + 1,
- sizeof(*freq_table),
- GFP_KERNEL);
- if (!freq_table) {
- result = -ENOMEM;
- goto err_unreg;
- }
-
- /* detect transition latency */
- policy->cpuinfo.transition_latency = 0;
- for (i=0; i<data->acpi_data.state_count; i++) {
- if ((data->acpi_data.states[i].transition_latency * 1000) >
- policy->cpuinfo.transition_latency) {
- policy->cpuinfo.transition_latency =
- data->acpi_data.states[i].transition_latency * 1000;
- }
- }
-
- /* table init */
- for (i = 0; i <= data->acpi_data.state_count; i++)
- {
- if (i < data->acpi_data.state_count) {
- freq_table[i].frequency =
- data->acpi_data.states[i].core_frequency * 1000;
- } else {
- freq_table[i].frequency = CPUFREQ_TABLE_END;
- }
- }
-
- policy->freq_table = freq_table;
-
- /* notify BIOS that we exist */
- acpi_processor_notify_smm(THIS_MODULE);
-
- pr_info("CPU%u - ACPI performance management activated\n", cpu);
-
- for (i = 0; i < data->acpi_data.state_count; i++)
- pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
- (i == data->acpi_data.state?'*':' '), i,
- (u32) data->acpi_data.states[i].core_frequency,
- (u32) data->acpi_data.states[i].power,
- (u32) data->acpi_data.states[i].transition_latency,
- (u32) data->acpi_data.states[i].bus_master_latency,
- (u32) data->acpi_data.states[i].status,
- (u32) data->acpi_data.states[i].control);
-
- /* the first call to ->target() should result in us actually
- * writing something to the appropriate registers. */
- data->resume = 1;
-
- return (result);
-
- err_unreg:
- acpi_processor_unregister_performance(cpu);
- err_free:
- kfree(data);
- acpi_io_data[cpu] = NULL;
-
- return (result);
-}
-
-
-static int
-acpi_cpufreq_cpu_exit (
- struct cpufreq_policy *policy)
-{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
- pr_debug("acpi_cpufreq_cpu_exit\n");
-
- if (data) {
- acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(policy->cpu);
- kfree(policy->freq_table);
- kfree(data);
- }
-
- return (0);
-}
-
-
-static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = acpi_cpufreq_target,
- .get = acpi_cpufreq_get,
- .init = acpi_cpufreq_cpu_init,
- .exit = acpi_cpufreq_cpu_exit,
- .name = "acpi-cpufreq",
- .attr = cpufreq_generic_attr,
-};
-
-
-static int __init
-acpi_cpufreq_init (void)
-{
- pr_debug("acpi_cpufreq_init\n");
-
- return cpufreq_register_driver(&acpi_cpufreq_driver);
-}
-
-
-static void __exit
-acpi_cpufreq_exit (void)
-{
- pr_debug("acpi_cpufreq_exit\n");
-
- cpufreq_unregister_driver(&acpi_cpufreq_driver);
-}
-
-late_initcall(acpi_cpufreq_init);
-module_exit(acpi_cpufreq_exit);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 535867a7dfdd..1492c92ffc1a 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -172,15 +172,13 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
return 0;
}
-static int imx_cpufreq_dt_remove(struct platform_device *pdev)
+static void imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
-
- return 0;
}
static struct platform_driver imx_cpufreq_dt_driver = {
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fb1501033bb..e93697d3edfd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -14,6 +14,8 @@
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define PU_SOC_VOLTAGE_NORMAL 1250000
#define PU_SOC_VOLTAGE_HIGH 1275000
@@ -205,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
- .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
@@ -225,8 +226,6 @@ static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
static int imx6q_opp_check_speed_grading(struct device *dev)
{
- struct device_node *np;
- void __iomem *base;
u32 val;
int ret;
@@ -235,16 +234,11 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6q-ocotp");
+ if (IS_ERR(ocotp))
+ return -ENOENT;
/*
* SPEED_GRADING[1:0] defines the max speed of ARM:
@@ -254,8 +248,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
* 2b'00: 792000000Hz;
* We need to set the max speed of ARM according to fuse map.
*/
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
val >>= OCOTP_CFG3_SPEED_SHIFT;
@@ -290,25 +283,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
if (ret)
return ret;
} else {
- struct device_node *np;
- void __iomem *base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
- if (!np)
- np = of_find_compatible_node(NULL, NULL,
- "fsl,imx6ull-ocotp");
- if (!np)
- return -ENOENT;
+ struct regmap *ocotp;
- base = of_iomap(np, 0);
- of_node_put(np);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- return -EFAULT;
- }
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ul-ocotp");
+ if (IS_ERR(ocotp))
+ ocotp = syscon_regmap_lookup_by_compatible("fsl,imx6ull-ocotp");
+
+ if (IS_ERR(ocotp))
+ return -ENOENT;
- val = readl_relaxed(base + OCOTP_CFG3);
- iounmap(base);
+ regmap_read(ocotp, OCOTP_CFG3, &val);
}
/*
@@ -327,7 +311,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
@@ -458,7 +442,7 @@ soc_opp_out:
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/*
* Calculate the ramp time for max voltage change in the
@@ -519,7 +503,7 @@ put_node:
return ret;
}
-static int imx6q_cpufreq_remove(struct platform_device *pdev)
+static void imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
@@ -530,8 +514,6 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
regulator_put(soc_reg);
clk_bulk_put(num_clks, clks);
-
- return 0;
}
static struct platform_driver imx6q_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ca2bce4341a..ec4abe374573 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -16,6 +16,7 @@
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched/cpufreq.h>
+#include <linux/sched/smt.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
@@ -25,7 +26,9 @@
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
+#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/units.h>
#include <asm/cpu.h>
#include <asm/div64.h>
@@ -172,7 +175,6 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -181,7 +183,6 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -201,8 +202,6 @@ struct global_params {
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
- * @prev_cummulative_iowait: IO Wait time difference from last and
- * current sample
* @sample: Storage for storing last Sample data
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
@@ -214,13 +213,15 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_cached Cached HWP energy-performance preference value
+ * @epp_cached: Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
+ * @capacity_perf: Highest perf used for scale invariance
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @pd_registered: Set when a perf domain is registered for this CPU.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
@@ -241,7 +242,6 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
- u64 prev_cummulative_iowait;
struct sample sample;
int32_t min_perf_ratio;
int32_t max_perf_ratio;
@@ -257,9 +257,13 @@ struct cpudata {
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
+ unsigned int capacity_perf;
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+#ifdef CONFIG_ENERGY_MODEL
+ bool pd_registered;
+#endif
struct delayed_work hwp_notify_work;
};
@@ -294,19 +298,25 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
-static int hwp_active __read_mostly;
-static int hwp_mode_bdw __read_mostly;
-static bool per_cpu_limits __read_mostly;
+static bool hwp_active __ro_after_init;
+static int hwp_mode_bdw __ro_after_init;
+static bool per_cpu_limits __ro_after_init;
+static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
-static bool hwp_forced __read_mostly;
+static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define INTEL_PSTATE_CORE_SCALING 100000
+#define HYBRID_SCALING_FACTOR_ADL 78741
+#define HYBRID_SCALING_FACTOR_MTL 80000
+#define HYBRID_SCALING_FACTOR_LNL 86957
+
+static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
- return 100000;
+ return INTEL_PSTATE_CORE_SCALING;
}
#ifdef CONFIG_ACPI
@@ -356,15 +366,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
int ret;
ret = cppc_get_perf_caps(cpu, &cppc_perf);
- if (ret)
- return;
-
/*
- * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
- * In this case we can't use CPPC.highest_perf to enable ITMT.
- * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0].
+ *
+ * Also, on some systems with overclocking enabled, CPPC.highest_perf is
+ * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT.
+ * Fall back to MSR_HWP_CAPABILITIES then too.
*/
- if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF)
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
/*
@@ -411,18 +420,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
- int ret;
-
- ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
- * If the nominal frequency and the nominal performance are not
- * zero and the ratio between them is not 100, return the hybrid
- * scaling factor.
+ * Compute the perf-to-frequency scaling factor for the given CPU if
+ * possible, unless it would be 0.
*/
- if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
- cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return HYBRID_SCALING_FACTOR;
+ if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
+ cppc_perf.nominal_perf && cppc_perf.nominal_freq)
+ return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
+ cppc_perf.nominal_perf);
return core_get_scaling();
}
@@ -526,6 +532,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+ unsigned int relation)
+{
+ if (freq == cpu->pstate.turbo_freq)
+ return cpu->pstate.turbo_pstate;
+
+ if (freq == cpu->pstate.max_freq)
+ return cpu->pstate.max_pstate;
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ return freq / cpu->pstate.scaling;
+ case CPUFREQ_RELATION_C:
+ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+ }
+
+ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
/**
* intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
@@ -543,41 +573,44 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int scaling = cpu->pstate.scaling;
+ int freq;
- pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
- pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+ pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ if (scaling == perf_ctl_scaling)
+ return;
+
+ hwp_is_hybrid = true;
+
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- cpu->pstate.max_pstate_physical =
- DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
- scaling);
+ freq = perf_ctl_max_phys * perf_ctl_scaling;
+ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
- cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.min_freq = freq;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
- cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
{
u64 misc_en;
- struct cpudata *cpu;
- cpu = all_cpu_data[0];
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled =
- (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
+
+ return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
static int min_perf_pct_min(void)
@@ -589,24 +622,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -614,40 +632,19 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -739,7 +736,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
@@ -751,7 +748,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -771,10 +768,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -893,7 +886,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -906,18 +899,295 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
+static u8 hybrid_get_cpu_type(unsigned int cpu)
+{
+ return cpu_data(cpu).topo.intel_type;
+}
+
+static bool no_cas __ro_after_init;
+
+static struct cpudata *hybrid_max_perf_cpu __read_mostly;
+/*
+ * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
+ * and the x86 arch scale-invariance information from concurrent updates.
+ */
+static DEFINE_MUTEX(hybrid_capacity_lock);
+
+#ifdef CONFIG_ENERGY_MODEL
+#define HYBRID_EM_STATE_COUNT 4
+
+static int hybrid_active_power(struct device *dev, unsigned long *power,
+ unsigned long *freq)
+{
+ /*
+ * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the
+ * full capacity.
+ *
+ * For this purpose, return the "frequency" of 2 for the first
+ * performance level and otherwise leave the value set by the caller.
+ */
+ if (!*freq)
+ *freq = 2;
+
+ /* No power information. */
+ *power = EM_MAX_POWER;
+
+ return 0;
+}
+
+static bool hybrid_has_l3(unsigned int cpu)
+{
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
+ unsigned int i;
+
+ if (!cacheinfo)
+ return false;
+
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3)
+ return true;
+ }
+
+ return false;
+}
+
+static int hybrid_get_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ /* Facilitate load balancing between CPUs of the same type. */
+ *cost = freq;
+ /*
+ * Adjust the cost depending on CPU type.
+ *
+ * The idea is to start loading up LPE-cores before E-cores and start
+ * to populate E-cores when LPE-cores are utilized above 60% of the
+ * capacity. Similarly, P-cores start to be populated when E-cores are
+ * utilized above 60% of the capacity.
+ */
+ if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) {
+ if (hybrid_has_l3(dev->id)) /* E-core */
+ *cost += 1;
+ } else { /* P-core */
+ *cost += 2;
+ }
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(unsigned int cpu)
+{
+ static const struct em_data_callback cb
+ = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ struct device *cpu_dev;
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || cpudata->pd_registered)
+ return false;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return false;
+
+ if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
+ return false;
+
+ cpudata->pd_registered = true;
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ hybrid_register_perf_domain(cpu);
+}
+
+static void hybrid_update_perf_domain(struct cpudata *cpu)
+{
+ if (cpu->pd_registered)
+ em_adjust_cpu_capacity(cpu->cpu);
+}
+#else /* !CONFIG_ENERGY_MODEL */
+static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
+#endif /* CONFIG_ENERGY_MODEL */
+
+static void hybrid_set_cpu_capacity(struct cpudata *cpu)
+{
+ arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
+ hybrid_max_perf_cpu->capacity_perf,
+ cpu->capacity_perf,
+ cpu->pstate.max_pstate_physical);
+ hybrid_update_perf_domain(cpu);
+
+ topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
+
+ pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n",
+ cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical,
+ hybrid_max_perf_cpu->capacity_perf);
+}
+
+static void hybrid_clear_cpu_capacity(unsigned int cpunum)
+{
+ arch_set_cpu_capacity(cpunum, 1, 1, 1, 1);
+}
+
+static void hybrid_get_capacity_perf(struct cpudata *cpu)
+{
+ if (READ_ONCE(global.no_turbo)) {
+ cpu->capacity_perf = cpu->pstate.max_pstate_physical;
+ return;
+ }
+
+ cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
+}
+
+static void hybrid_set_capacity_of_cpus(void)
+{
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (cpu)
+ hybrid_set_cpu_capacity(cpu);
+ }
+}
+
+static void hybrid_update_cpu_capacity_scaling(void)
+{
+ struct cpudata *max_perf_cpu = NULL;
+ unsigned int max_cap_perf = 0;
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (!cpu)
+ continue;
+
+ /*
+ * During initialization, CPU performance at full capacity needs
+ * to be determined.
+ */
+ if (!hybrid_max_perf_cpu)
+ hybrid_get_capacity_perf(cpu);
+
+ /*
+ * If hybrid_max_perf_cpu is not NULL at this point, it is
+ * being replaced, so don't take it into account when looking
+ * for the new one.
+ */
+ if (cpu == hybrid_max_perf_cpu)
+ continue;
+
+ if (cpu->capacity_perf > max_cap_perf) {
+ max_cap_perf = cpu->capacity_perf;
+ max_perf_cpu = cpu;
+ }
+ }
+
+ if (max_perf_cpu) {
+ hybrid_max_perf_cpu = max_perf_cpu;
+ hybrid_set_capacity_of_cpus();
+ } else {
+ pr_info("Found no CPUs with nonzero maximum performance\n");
+ /* Revert to the flat CPU capacity structure. */
+ for_each_online_cpu(cpunum)
+ hybrid_clear_cpu_capacity(cpunum);
+ }
+}
+
+static void __hybrid_refresh_cpu_capacity_scaling(void)
+{
+ hybrid_max_perf_cpu = NULL;
+ hybrid_update_cpu_capacity_scaling();
+}
+
+static void hybrid_refresh_cpu_capacity_scaling(void)
+{
+ guard(mutex)(&hybrid_capacity_lock);
+
+ __hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
+}
+
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
+{
+ /* Bail out if enabling capacity-aware scheduling is prohibited. */
+ if (no_cas)
+ return;
+
+ /*
+ * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
+ * scaling has been enabled already and the driver is just changing the
+ * operation mode.
+ */
+ if (refresh) {
+ hybrid_refresh_cpu_capacity_scaling();
+ return;
+ }
+
+ /*
+ * On hybrid systems, use asym capacity instead of ITMT, but because
+ * the capacity of SMT threads is not deterministic even approximately,
+ * do not do that when SMT is in use.
+ */
+ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
+ hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity and EAS.
+ */
+ sched_clear_itmt_support();
+ }
+}
+
+static bool hybrid_clear_max_perf_cpu(void)
+{
+ bool ret;
+
+ guard(mutex)(&hybrid_capacity_lock);
+
+ ret = !!hybrid_max_perf_cpu;
+ hybrid_max_perf_cpu = NULL;
+
+ return ret;
+}
+
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -941,6 +1211,51 @@ static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
}
}
+static void hybrid_update_capacity(struct cpudata *cpu)
+{
+ unsigned int max_cap_perf;
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu)
+ goto unlock;
+
+ /*
+ * The maximum performance of the CPU may have changed, but assume
+ * that the performance of the other CPUs has not changed.
+ */
+ max_cap_perf = hybrid_max_perf_cpu->capacity_perf;
+
+ intel_pstate_get_hwp_cap(cpu);
+
+ hybrid_get_capacity_perf(cpu);
+ /* Should hybrid_max_perf_cpu be replaced by this CPU? */
+ if (cpu->capacity_perf > max_cap_perf) {
+ hybrid_max_perf_cpu = cpu;
+ hybrid_set_capacity_of_cpus();
+ goto unlock;
+ }
+
+ /* If this CPU is hybrid_max_perf_cpu, should it be replaced? */
+ if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) {
+ hybrid_update_cpu_capacity_scaling();
+ goto unlock;
+ }
+
+ hybrid_set_cpu_capacity(cpu);
+ /*
+ * If the CPU was offline to start with and it is going online for the
+ * first time, a perf domain needs to be registered for it if hybrid
+ * capacity scaling has been enabled already. In that case, sched
+ * domains need to be rebuilt to take the new perf domain into account.
+ */
+ if (hybrid_register_perf_domain(cpu->cpu))
+ em_rebuild_sched_domains();
+
+unlock:
+ mutex_unlock(&hybrid_capacity_lock);
+}
+
static void intel_pstate_hwp_set(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
@@ -954,7 +1269,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -996,12 +1311,11 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
@@ -1048,20 +1362,40 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu) {
+ mutex_unlock(&hybrid_capacity_lock);
+
+ return;
+ }
+
+ if (hybrid_max_perf_cpu == cpu)
+ hybrid_update_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
+
+ /* Reset the capacity of the CPU going offline to the initial value. */
+ hybrid_clear_cpu_capacity(cpu->cpu);
}
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
{
u64 power_ctl;
- mutex_lock(&intel_pstate_driver_lock);
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ guard(mutex)(&intel_pstate_driver_lock);
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1069,8 +1403,7 @@ static void set_power_ctl_ee_state(bool input)
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
- wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
- mutex_unlock(&intel_pstate_driver_lock);
+ wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
@@ -1078,7 +1411,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
@@ -1129,45 +1462,53 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
- struct cpufreq_policy *policy)
+static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
+ struct cpudata *cpudata)
{
- policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ guard(cpufreq_policy_write)(policy);
+
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
+
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
- return;
+ return false;
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+ __intel_pstate_update_max_freq(policy, cpudata);
- cpufreq_cpu_release(policy);
+ return true;
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits(struct cpufreq_policy *policy)
{
- mutex_lock(&intel_pstate_driver_lock);
+ struct cpudata *cpudata = all_cpu_data[policy->cpu];
- update_turbo_state();
- /*
- * If turbo has been turned on or off globally, policy limits for
- * all CPUs need to be updated to reflect that.
- */
- if (global.turbo_disabled_mf != global.turbo_disabled) {
- global.turbo_disabled_mf = global.turbo_disabled;
- arch_set_max_freq_ratio(global.turbo_disabled);
- for_each_possible_cpu(cpu)
- intel_pstate_update_max_freq(cpu);
- } else {
- cpufreq_update_policy(cpu);
- }
+ __intel_pstate_update_max_freq(policy, cpudata);
+
+ hybrid_update_capacity(cpudata);
+}
+
+static void intel_pstate_update_limits_for_all(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(all_cpu_data[cpu]);
- mutex_unlock(&intel_pstate_driver_lock);
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (hybrid_max_perf_cpu)
+ __hybrid_refresh_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
}
/************************** sysfs begin ************************/
@@ -1184,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
- ret = intel_pstate_show_status(buf);
- mutex_unlock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- return ret;
+ return intel_pstate_show_status(buf);
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
@@ -1199,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
ret = intel_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&intel_pstate_driver_lock);
+ if (ret < 0)
+ return ret;
- return ret < 0 ? ret : count;
+ return count;
}
static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -1213,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
@@ -1227,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -1238,74 +1573,61 @@ static ssize_t show_num_pstates(struct kobject *kobj,
struct cpudata *cpu;
int total;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", total);
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
-
- update_turbo_state();
- if (global.turbo_disabled)
- ret = sprintf(buf, "%u\n", global.turbo_disabled);
- else
- ret = sprintf(buf, "%u\n", global.no_turbo);
-
- mutex_unlock(&intel_pstate_driver_lock);
- return ret;
+ return sprintf(buf, "%u\n", global.no_turbo);
}
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
- int ret;
+ bool no_turbo;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
+ if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
- mutex_lock(&intel_pstate_limits_lock);
+ no_turbo = !!clamp_t(int, input, 0, 1);
- update_turbo_state();
- if (global.turbo_disabled) {
- pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
- mutex_unlock(&intel_pstate_limits_lock);
- mutex_unlock(&intel_pstate_driver_lock);
- return -EPERM;
+ WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
+ if (global.turbo_disabled && !no_turbo) {
+ pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
+ if (global.no_turbo)
+ return -EPERM;
+
+ no_turbo = 1;
}
- global.no_turbo = clamp_t(int, input, 0, 1);
+ if (no_turbo == global.no_turbo)
+ return count;
+
+ WRITE_ONCE(global.no_turbo, no_turbo);
- if (global.no_turbo) {
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (no_turbo) {
struct cpudata *cpu = all_cpu_data[0];
int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
@@ -1316,49 +1638,46 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
mutex_unlock(&intel_pstate_limits_lock);
- intel_pstate_update_policies();
- arch_set_max_freq_ratio(global.no_turbo);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ intel_pstate_update_limits_for_all();
+ arch_set_max_freq_ratio(no_turbo);
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1371,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1387,9 +1704,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MAX);
return count;
}
@@ -1404,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1421,9 +1734,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MIN);
return count;
}
@@ -1445,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
if (ret)
return ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
hwp_boost = !!input;
intel_pstate_update_policies();
- mutex_unlock(&intel_pstate_driver_lock);
return count;
}
@@ -1459,7 +1770,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
u64 power_ctl;
int enable;
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
@@ -1596,98 +1907,97 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
-
- if (policy) {
- intel_pstate_get_hwp_cap(cpudata);
- __intel_pstate_update_max_freq(cpudata, policy);
- cpufreq_cpu_release(policy);
+ if (intel_pstate_update_max_freq(cpudata)) {
+ /*
+ * The driver will not be unregistered while this function is
+ * running, so update the capacity without acquiring the driver
+ * lock.
+ */
+ hybrid_update_capacity(cpudata);
}
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
-static DEFINE_SPINLOCK(hwp_notify_lock);
+static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;
+#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3)
+
void notify_hwp_interrupt(void)
{
unsigned int this_cpu = smp_processor_id();
- struct cpudata *cpudata;
+ u64 value, status_mask;
unsigned long flags;
- u64 value;
- if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- rdmsrl_safe(MSR_HWP_STATUS, &value);
- if (!(value & 0x01))
+ status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS;
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
+
+ rdmsrq_safe(MSR_HWP_STATUS, &value);
+ if (!(value & status_mask))
return;
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ raw_spin_lock_irqsave(&hwp_notify_lock, flags);
if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
goto ack_intr;
- /*
- * Currently we never free all_cpu_data. And we can't reach here
- * without this allocated. But for safety for future changes, added
- * check.
- */
- if (unlikely(!READ_ONCE(all_cpu_data)))
- goto ack_intr;
-
- /*
- * The free is done during cleanup, when cpufreq registry is failed.
- * We wouldn't be here if it fails on init or switch status. But for
- * future changes, added check.
- */
- cpudata = READ_ONCE(all_cpu_data[this_cpu]);
- if (unlikely(!cpudata))
- goto ack_intr;
-
- schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+ schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
+ msecs_to_jiffies(10));
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
return;
ack_intr:
- wrmsrl_safe(MSR_HWP_STATUS, 0);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
+ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
- unsigned long flags;
+ bool cancel_work;
- if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- spin_lock_irqsave(&hwp_notify_lock, flags);
- if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
- cancel_delayed_work(&cpudata->hwp_notify_work);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_lock_irq(&hwp_notify_lock);
+ cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ raw_spin_unlock_irq(&hwp_notify_lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}
+#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0)
+#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2)
+
static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
- /* Enable HWP notification interrupt for guaranteed performance change */
+ /* Enable HWP notification interrupt for performance change */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
- unsigned long flags;
+ u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
- spin_lock_irqsave(&hwp_notify_lock, flags);
+ raw_spin_lock_irq(&hwp_notify_lock);
INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
- spin_unlock_irqrestore(&hwp_notify_lock, flags);
+ raw_spin_unlock_irq(&hwp_notify_lock);
+
+ if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
+ interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1696,13 +2006,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
/*
- * If this CPU gen doesn't call for change in balance_perf
- * EPP return.
- */
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
- return;
-
- /*
* If the EPP is set by firmware, which means that firmware enabled HWP
* - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
@@ -1715,6 +2018,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
}
/*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
* Use hard coded value per gen to update the balance_perf
* and default EPP.
*/
@@ -1726,9 +2036,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
@@ -1738,11 +2048,23 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
intel_pstate_update_epp_defaults(cpudata);
}
+static u64 get_perf_ctl_val(int pstate)
+{
+ u64 val;
+
+ val = (u64)pstate << 8;
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
+ val |= (u64)1 << 32;
+
+ return val;
+}
+
static int atom_get_min_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -1750,7 +2072,7 @@ static int atom_get_max_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -1758,20 +2080,16 @@ static int atom_get_turbo_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
+ u64 val = get_perf_ctl_val(pstate);
int32_t vid_fp;
u32 vid;
- val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
- val |= (u64)1 << 32;
-
vid_fp = cpudata->vid.min + mul_fp(
int_tofp(pstate - cpudata->pstate.min_pstate),
cpudata->vid.ratio);
@@ -1793,7 +2111,7 @@ static int silvermont_get_scaling(void)
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
@@ -1809,7 +2127,7 @@ static int airmont_get_scaling(void)
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
@@ -1820,7 +2138,7 @@ static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
@@ -1828,7 +2146,7 @@ static void atom_get_vid(struct cpudata *cpudata)
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
- rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
@@ -1836,7 +2154,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -1844,7 +2162,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -1858,13 +2176,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
- err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
- err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
@@ -1889,7 +2207,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -1901,7 +2219,7 @@ static int core_get_max_pstate(int cpu)
return tdp_ratio;
}
- err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
@@ -1921,7 +2239,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -1931,13 +2249,7 @@ static int core_get_turbo_pstate(int cpu)
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
-
- val = (u64)pstate << 8;
- if (global.no_turbo && !global.turbo_disabled)
- val |= (u64)1 << 32;
-
- return val;
+ return get_perf_ctl_val(pstate);
}
static int knl_get_aperf_mperf_shift(void)
@@ -1950,7 +2262,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -1958,33 +2270,27 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
- u8 cpu_type = 0;
+ if (hybrid_scaling_factor) {
+ /*
+ * Return the hybrid scaling factor for P-cores and use the
+ * default core scaling for E-cores.
+ */
+ if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE)
+ return hybrid_scaling_factor;
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
- return HYBRID_SCALING_FACTOR;
+ return core_get_scaling();
+ }
- /* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ /* Use core scaling on non-hybrid systems. */
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return core_get_scaling();
/*
- * If reached here, this system is either non-hybrid (like Tiger
- * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
- * no E cores (in which case CPUID for hybrid support is 0).
- *
- * The CPPC nominal_frequency field is 0 for non-hybrid systems,
- * so the default core scaling will be used for them.
+ * The system is hybrid, but the hybrid scaling factor is not known or
+ * the CPU type is not one of the above, so use CPPC to compute the
+ * scaling factor for this CPU.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
@@ -1998,7 +2304,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
@@ -2007,21 +2313,12 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
- int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
- update_turbo_state();
- intel_pstate_set_pstate(cpu, pstate);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
int perf_ctl_scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu);
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
- cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
@@ -2029,11 +2326,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling)
- intel_pstate_hybrid_hwp_adjust(cpu);
+ intel_pstate_hybrid_hwp_adjust(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
+ /*
+ * If the CPU is going online for the first time and it was
+ * offline initially, asym capacity scaling needs to be updated.
+ */
+ hybrid_update_capacity(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
@@ -2106,7 +2407,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
- wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ wrmsrq(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
@@ -2119,7 +2420,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
- wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
@@ -2180,8 +2481,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
u64 tsc;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
@@ -2208,7 +2509,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -2240,7 +2541,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = global.no_turbo || global.turbo_disabled ?
+ target = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2275,7 +2576,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
- wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+ wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
@@ -2284,8 +2585,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
struct sample *sample;
int target_pstate;
- update_turbo_state();
-
target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2380,51 +2679,62 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-#define X86_MATCH(model, policy) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_APERFMPERF, &policy)
+#define X86_MATCH(vfm, policy) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, &policy)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- X86_MATCH(SANDYBRIDGE, core_funcs),
- X86_MATCH(SANDYBRIDGE_X, core_funcs),
- X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
- X86_MATCH(IVYBRIDGE, core_funcs),
- X86_MATCH(HASWELL, core_funcs),
- X86_MATCH(BROADWELL, core_funcs),
- X86_MATCH(IVYBRIDGE_X, core_funcs),
- X86_MATCH(HASWELL_X, core_funcs),
- X86_MATCH(HASWELL_L, core_funcs),
- X86_MATCH(HASWELL_G, core_funcs),
- X86_MATCH(BROADWELL_G, core_funcs),
- X86_MATCH(ATOM_AIRMONT, airmont_funcs),
- X86_MATCH(SKYLAKE_L, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE, core_funcs),
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(XEON_PHI_KNL, knl_funcs),
- X86_MATCH(XEON_PHI_KNM, knl_funcs),
- X86_MATCH(ATOM_GOLDMONT, core_funcs),
- X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(COMETLAKE, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(TIGERLAKE, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_SANDYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_SILVERMONT, silvermont_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE, core_funcs),
+ X86_MATCH(INTEL_HASWELL, core_funcs),
+ X86_MATCH(INTEL_BROADWELL, core_funcs),
+ X86_MATCH(INTEL_IVYBRIDGE_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_X, core_funcs),
+ X86_MATCH(INTEL_HASWELL_L, core_funcs),
+ X86_MATCH(INTEL_HASWELL_G, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_G, core_funcs),
+ X86_MATCH(INTEL_ATOM_AIRMONT, airmont_funcs),
+ X86_MATCH(INTEL_SKYLAKE_L, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNL, knl_funcs),
+ X86_MATCH(INTEL_XEON_PHI_KNM, knl_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_COMETLAKE, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_TIGERLAKE, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
- X86_MATCH(BROADWELL_D, core_funcs),
- X86_MATCH(BROADWELL_X, core_funcs),
- X86_MATCH(SKYLAKE_X, core_funcs),
- X86_MATCH(ICELAKE_X, core_funcs),
- X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_D, core_funcs),
+ X86_MATCH(INTEL_BROADWELL_X, core_funcs),
+ X86_MATCH(INTEL_SKYLAKE_X, core_funcs),
+ X86_MATCH(INTEL_ICELAKE_X, core_funcs),
+ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
+ X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs),
{}
};
+#endif
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
- X86_MATCH(KABYLAKE, core_funcs),
+ X86_MATCH(INTEL_KABYLAKE, core_funcs),
{}
};
@@ -2461,7 +2771,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
}
cpu->epp_powersave = -EINVAL;
- cpu->epp_policy = 0;
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
intel_pstate_get_cpu_pstates(cpu);
@@ -2503,7 +2813,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
- return global.turbo_disabled || global.no_turbo ?
+ return READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}
@@ -2528,13 +2838,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* abstract values to represent performance rather than pure ratios.
*/
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
- int scaling = cpu->pstate.scaling;
int freq;
freq = max_policy_perf * perf_ctl_scaling;
- max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
freq = min_policy_perf * perf_ctl_scaling;
- min_policy_perf = DIV_ROUND_UP(freq, scaling);
+ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2589,12 +2898,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
/*
* NOHZ_FULL CPUs need this as the governor callback may not
* be invoked on them.
*/
intel_pstate_clear_update_util_hook(policy->cpu);
- intel_pstate_max_within_limits(cpu);
+ intel_pstate_set_pstate(cpu, pstate);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
@@ -2609,6 +2920,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_hwp_set(policy->cpu);
}
+ /*
+ * policy->cur is never updated with the intel_pstate driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
mutex_unlock(&intel_pstate_limits_lock);
@@ -2632,10 +2948,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
int max_freq;
- update_turbo_state();
if (hwp_active) {
intel_pstate_get_hwp_cap(cpu);
- max_freq = global.no_turbo || global.turbo_disabled ?
+ max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
} else {
max_freq = intel_pstate_get_max_freq(cpu);
@@ -2692,6 +3007,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
*/
intel_pstate_hwp_reenable(cpu);
cpu->suspended = false;
+
+ hybrid_update_capacity(cpu);
}
return 0;
@@ -2704,13 +3021,11 @@ static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
return intel_cpufreq_cpu_offline(policy);
}
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
-
- return 0;
}
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -2729,9 +3044,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_freq;
- update_turbo_state();
- global.turbo_disabled_mf = global.turbo_disabled;
- policy->cpuinfo.max_freq = global.turbo_disabled ?
+ policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
policy->min = policy->cpuinfo.min_freq;
@@ -2847,19 +3160,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
- wrmsrl(MSR_HWP_REQUEST, value);
+ wrmsrq(MSR_HWP_REQUEST, value);
else
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
- wrmsrl(MSR_IA32_PERF_CTL,
+ wrmsrq(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
@@ -2874,8 +3187,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
- fast_switch);
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
+ target_pstate, fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
@@ -2896,25 +3209,12 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
- update_turbo_state();
-
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
- break;
- case CPUFREQ_RELATION_H:
- target_pstate = freqs.new / cpu->pstate.scaling;
- break;
- default:
- target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
- break;
- }
-
+ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2930,9 +3230,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- update_turbo_state();
-
- target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2949,9 +3247,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
int old_pstate = cpu->pstate.current_pstate;
int cap_pstate, min_pstate, max_pstate, target_pstate;
- update_turbo_state();
- cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
- HWP_HIGHEST_PERF(hwp_cap);
+ cap_pstate = READ_ONCE(global.no_turbo) ?
+ HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
/* Optimization: Avoid unnecessary divisions. */
@@ -2969,6 +3267,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
+ if (min_pstate > cpu->max_perf_ratio)
+ min_pstate = cpu->max_perf_ratio;
+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
@@ -3015,7 +3316,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
@@ -3055,7 +3356,7 @@ pstate_exit:
return ret;
}
-static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct freq_qos_request *req;
@@ -3065,7 +3366,7 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(req);
kfree(req);
- return intel_pstate_cpu_exit(policy);
+ intel_pstate_cpu_exit(policy);
}
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
@@ -3082,7 +3383,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
@@ -3116,10 +3417,8 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
- spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
WRITE_ONCE(all_cpu_data[cpu], NULL);
- spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3129,6 +3428,7 @@ static void intel_pstate_driver_cleanup(void)
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
+ bool refresh_cpu_cap_scaling;
int ret;
if (driver == &intel_pstate)
@@ -3136,6 +3436,12 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
memset(&global, 0, sizeof(global));
global.max_perf_pct = 100;
+ global.turbo_disabled = turbo_is_disabled();
+ global.no_turbo = global.turbo_disabled;
+
+ arch_set_max_freq_ratio(global.turbo_disabled);
+
+ refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3146,6 +3452,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
+ hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
+
return 0;
}
@@ -3325,7 +3633,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
- rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3367,14 +3675,13 @@ static inline void intel_pstate_request_control_from_smm(void) {}
#define INTEL_PSTATE_HWP_BROADWELL 0x01
-#define X86_MATCH_HWP(model, hwp_mode) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
- X86_FEATURE_HWP, hwp_mode)
+#define X86_MATCH_HWP(vfm, hwp_mode) \
+ X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_HWP, hwp_mode)
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
- X86_MATCH_HWP(ANY, 0),
+ X86_MATCH_HWP(INTEL_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(INTEL_ANY, 0),
{}
};
@@ -3382,21 +3689,74 @@ static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
-static const struct x86_cpu_id intel_epp_balance_perf[] = {
+#define POWERSAVE_MASK GENMASK(7, 0)
+#define BALANCE_POWER_MASK GENMASK(15, 8)
+#define BALANCE_PERFORMANCE_MASK GENMASK(23, 16)
+#define PERFORMANCE_MASK GENMASK(31, 24)
+
+#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \
+ (FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\
+ FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\
+ FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\
+ FIELD_PREP_CONST(PERFORMANCE_MASK, performance))
+
+#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \
+ (HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\
+ balance_perf, HWP_EPP_PERFORMANCE))
+
+static const struct x86_cpu_id intel_epp_default[] = {
/*
* Set EPP value as 102, this is the max suggested EPP
* which can result in one core turbo frequency for
* AlderLake Mobile CPUs.
*/
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
- X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
+ 179, 64, 16)),
{}
};
+static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
+ {}
+};
+
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3406,26 +3766,44 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- WRITE_ONCE(hwp_active, 1);
+
+ if (!no_hwp) {
+ hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
@@ -3461,15 +3839,6 @@ static int __init intel_pstate_init(void)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists()) {
- pr_info("P-states controlled by the platform\n");
- return -ENODEV;
- }
-
if (!hwp_active && hwp_only)
return -ENOTSUPP;
@@ -3486,15 +3855,35 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
if (hwp_active) {
- const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default);
+ const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
+
+ if (id) {
+ epp_values[EPP_INDEX_POWERSAVE] =
+ FIELD_GET(POWERSAVE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE] =
+ FIELD_GET(BALANCE_POWER_MASK, id->driver_data);
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] =
+ FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data);
+ epp_values[EPP_INDEX_PERFORMANCE] =
+ FIELD_GET(PERFORMANCE_MASK, id->driver_data);
+ pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n",
+ epp_values[EPP_INDEX_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_POWERSAVE],
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE],
+ epp_values[EPP_INDEX_PERFORMANCE]);
+ }
+
+ if (hybrid_id) {
+ hybrid_scaling_factor = hybrid_id->driver_data;
+ pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
+ }
- if (id)
- epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
}
- mutex_lock(&intel_pstate_driver_lock);
- rc = intel_pstate_register_driver(default_driver);
- mutex_unlock(&intel_pstate_driver_lock);
+ scoped_guard(mutex, &intel_pstate_driver_lock) {
+ rc = intel_pstate_register_driver(default_driver);
+ }
if (rc) {
intel_pstate_sysfs_remove();
return rc;
@@ -3533,6 +3922,9 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "no_cas"))
+ no_cas = true;
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 95588101efbd..24b285cbeb8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
@@ -178,15 +177,13 @@ out_node:
return err;
}
-static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+static void kirkwood_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
clk_disable_unprepare(priv.powersave_clk);
clk_disable_unprepare(priv.ddr_clk);
clk_disable_unprepare(priv.cpu_clk);
-
- return 0;
}
static struct platform_driver kirkwood_cpufreq_platform_driver = {
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4c57c6725c13..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index)
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
union msr_longhaul longhaul;
u32 t;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
@@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -231,13 +231,14 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
}
/**
- * longhaul_set_cpu_frequency()
- * @mults_index : bitpattern of the new multiplier.
+ * longhaul_setstate()
+ * @policy: cpufreq_policy structure containing the current policy.
+ * @table_index: index of the frequency within the cpufreq_frequency_table.
*
* Sets a new clock ratio.
*/
@@ -533,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
@@ -905,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = {
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index afc59b292153..39a6c4315a60 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -85,19 +85,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .exit = loongson2_cpufreq_exit,
- .attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
@@ -154,7 +147,9 @@ static int __init cpufreq_init(void)
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
- if (!ret && !nowait) {
+ if (ret) {
+ platform_driver_unregister(&platform_driver);
+ } else if (!nowait) {
saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait;
}
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
new file mode 100644
index 000000000000..1e8715ea1b77
--- /dev/null
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPUFreq driver for the Loongson-3 processors.
+ *
+ * All revisions of Loongson-3 processor support cpu_has_scalefreq feature.
+ *
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2024 Loongson Technology Corporation Limited
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/loongson.h>
+
+/* Message */
+union smc_message {
+ u32 value;
+ struct {
+ u32 id : 4;
+ u32 info : 4;
+ u32 val : 16;
+ u32 cmd : 6;
+ u32 extra : 1;
+ u32 complete : 1;
+ };
+};
+
+/* Command return values */
+#define CMD_OK 0 /* No error */
+#define CMD_ERROR 1 /* Regular error */
+#define CMD_NOCMD 2 /* Command does not support */
+#define CMD_INVAL 3 /* Invalid Parameter */
+
+/* Version commands */
+/*
+ * CMD_GET_VERSION - Get interface version
+ * Input: none
+ * Output: version
+ */
+#define CMD_GET_VERSION 0x1
+
+/* Feature commands */
+/*
+ * CMD_GET_FEATURE - Get feature state
+ * Input: feature ID
+ * Output: feature flag
+ */
+#define CMD_GET_FEATURE 0x2
+
+/*
+ * CMD_SET_FEATURE - Set feature state
+ * Input: feature ID, feature flag
+ * output: none
+ */
+#define CMD_SET_FEATURE 0x3
+
+/* Feature IDs */
+#define FEATURE_SENSOR 0
+#define FEATURE_FAN 1
+#define FEATURE_DVFS 2
+
+/* Sensor feature flags */
+#define FEATURE_SENSOR_ENABLE BIT(0)
+#define FEATURE_SENSOR_SAMPLE BIT(1)
+
+/* Fan feature flags */
+#define FEATURE_FAN_ENABLE BIT(0)
+#define FEATURE_FAN_AUTO BIT(1)
+
+/* DVFS feature flags */
+#define FEATURE_DVFS_ENABLE BIT(0)
+#define FEATURE_DVFS_BOOST BIT(1)
+#define FEATURE_DVFS_AUTO BIT(2)
+#define FEATURE_DVFS_SINGLE_BOOST BIT(3)
+
+/* Sensor commands */
+/*
+ * CMD_GET_SENSOR_NUM - Get number of sensors
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_SENSOR_NUM 0x4
+
+/*
+ * CMD_GET_SENSOR_STATUS - Get sensor status
+ * Input: sensor ID, type
+ * Output: sensor status
+ */
+#define CMD_GET_SENSOR_STATUS 0x5
+
+/* Sensor types */
+#define SENSOR_INFO_TYPE 0
+#define SENSOR_INFO_TYPE_TEMP 1
+
+/* Fan commands */
+/*
+ * CMD_GET_FAN_NUM - Get number of fans
+ * Input: none
+ * Output: number
+ */
+#define CMD_GET_FAN_NUM 0x6
+
+/*
+ * CMD_GET_FAN_INFO - Get fan status
+ * Input: fan ID, type
+ * Output: fan info
+ */
+#define CMD_GET_FAN_INFO 0x7
+
+/*
+ * CMD_SET_FAN_INFO - Set fan status
+ * Input: fan ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FAN_INFO 0x8
+
+/* Fan types */
+#define FAN_INFO_TYPE_LEVEL 0
+
+/* DVFS commands */
+/*
+ * CMD_GET_FREQ_LEVEL_NUM - Get number of freq levels
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_LEVEL_NUM 0x9
+
+/*
+ * CMD_GET_FREQ_BOOST_LEVEL - Get the first boost level
+ * Input: CPU ID
+ * Output: number
+ */
+#define CMD_GET_FREQ_BOOST_LEVEL 0x10
+
+/*
+ * CMD_GET_FREQ_LEVEL_INFO - Get freq level info
+ * Input: CPU ID, level ID
+ * Output: level info
+ */
+#define CMD_GET_FREQ_LEVEL_INFO 0x11
+
+/*
+ * CMD_GET_FREQ_INFO - Get freq info
+ * Input: CPU ID, type
+ * Output: freq info
+ */
+#define CMD_GET_FREQ_INFO 0x12
+
+/*
+ * CMD_SET_FREQ_INFO - Set freq info
+ * Input: CPU ID, type, value
+ * Output: none
+ */
+#define CMD_SET_FREQ_INFO 0x13
+
+/* Freq types */
+#define FREQ_INFO_TYPE_FREQ 0
+#define FREQ_INFO_TYPE_LEVEL 1
+
+#define FREQ_MAX_LEVEL 16
+
+struct loongson3_freq_data {
+ unsigned int def_freq_level;
+ struct cpufreq_frequency_table table[];
+};
+
+static struct mutex cpufreq_mutex[MAX_PACKAGES];
+static struct cpufreq_driver loongson3_cpufreq_driver;
+static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data);
+
+static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra)
+{
+ int retries;
+ unsigned int cpu = raw_smp_processor_id();
+ unsigned int package = cpu_data[cpu].package;
+ union smc_message msg, last;
+
+ mutex_lock(&cpufreq_mutex[package]);
+
+ last.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (!last.complete) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ msg.id = id;
+ msg.info = info;
+ msg.cmd = cmd;
+ msg.val = val;
+ msg.extra = extra;
+ msg.complete = 0;
+
+ iocsr_write32(msg.value, LOONGARCH_IOCSR_SMCMBX);
+ iocsr_write32(iocsr_read32(LOONGARCH_IOCSR_MISC_FUNC) | IOCSR_MISC_FUNC_SOFT_INT,
+ LOONGARCH_IOCSR_MISC_FUNC);
+
+ for (retries = 0; retries < 10000; retries++) {
+ msg.value = iocsr_read32(LOONGARCH_IOCSR_SMCMBX);
+ if (msg.complete)
+ break;
+
+ usleep_range(8, 12);
+ }
+
+ if (!msg.complete || msg.cmd != CMD_OK) {
+ mutex_unlock(&cpufreq_mutex[package]);
+ return -EPERM;
+ }
+
+ mutex_unlock(&cpufreq_mutex[package]);
+
+ return msg.val;
+}
+
+static unsigned int loongson3_cpufreq_get(unsigned int cpu)
+{
+ int ret;
+
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_INFO, 0, 0);
+
+ return ret * KILO;
+}
+
+static int loongson3_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int ret;
+
+ ret = do_service_request(cpu_data[policy->cpu].core,
+ FREQ_INFO_TYPE_LEVEL, CMD_SET_FREQ_INFO, index, 0);
+
+ return (ret >= 0) ? 0 : ret;
+}
+
+static int configure_freq_table(int cpu)
+{
+ int i, ret, boost_level, max_level, freq_level;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct loongson3_freq_data *data;
+
+ if (per_cpu(freq_data, cpu))
+ return 0;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_LEVEL_NUM, 0, 0);
+ if (ret < 0)
+ return ret;
+ max_level = ret;
+
+ ret = do_service_request(cpu, 0, CMD_GET_FREQ_BOOST_LEVEL, 0, 0);
+ if (ret < 0)
+ return ret;
+ boost_level = ret;
+
+ freq_level = min(max_level, FREQ_MAX_LEVEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, table, freq_level + 1), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->def_freq_level = boost_level - 1;
+
+ for (i = 0; i < freq_level; i++) {
+ ret = do_service_request(cpu, FREQ_INFO_TYPE_FREQ, CMD_GET_FREQ_LEVEL_INFO, i, 0);
+ if (ret < 0) {
+ devm_kfree(&pdev->dev, data);
+ return ret;
+ }
+
+ data->table[i].frequency = ret * KILO;
+ data->table[i].flags = (i >= boost_level) ? CPUFREQ_BOOST_FREQ : 0;
+ }
+
+ data->table[freq_level].flags = 0;
+ data->table[freq_level].frequency = CPUFREQ_TABLE_END;
+
+ per_cpu(freq_data, cpu) = data;
+
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i, ret, cpu = policy->cpu;
+
+ ret = configure_freq_table(cpu);
+ if (ret < 0)
+ return ret;
+
+ policy->cpuinfo.transition_latency = 10000;
+ policy->freq_table = per_cpu(freq_data, cpu)->table;
+ policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency;
+ cpumask_copy(policy->cpus, topology_sibling_cpumask(cpu));
+
+ for_each_cpu(i, policy->cpus) {
+ if (i != cpu)
+ per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
+ }
+
+ return 0;
+}
+
+static void loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level);
+}
+
+static int loongson3_cpufreq_cpu_online(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int loongson3_cpufreq_cpu_offline(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver loongson3_cpufreq_driver = {
+ .name = "loongson3",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = loongson3_cpufreq_cpu_init,
+ .exit = loongson3_cpufreq_cpu_exit,
+ .online = loongson3_cpufreq_cpu_online,
+ .offline = loongson3_cpufreq_cpu_offline,
+ .get = loongson3_cpufreq_get,
+ .target_index = loongson3_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .set_boost = cpufreq_boost_set_sw,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static int loongson3_cpufreq_probe(struct platform_device *pdev)
+{
+ int i, ret;
+
+ for (i = 0; i < MAX_PACKAGES; i++) {
+ ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
+ if (ret <= 0)
+ return -EPERM;
+
+ ret = do_service_request(FEATURE_DVFS, 0, CMD_SET_FEATURE,
+ FEATURE_DVFS_ENABLE | FEATURE_DVFS_BOOST, 0);
+ if (ret < 0)
+ return -EPERM;
+
+ loongson3_cpufreq_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&loongson3_cpufreq_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-3 CPU frequency driver.\n");
+
+ return 0;
+}
+
+static void loongson3_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&loongson3_cpufreq_driver);
+}
+
+static struct platform_device_id cpufreq_id_table[] = {
+ { "loongson3_cpufreq", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cpufreq_id_table);
+
+static struct platform_driver loongson3_platform_driver = {
+ .driver = {
+ .name = "loongson3_cpufreq",
+ },
+ .id_table = cpufreq_id_table,
+ .probe = loongson3_cpufreq_probe,
+ .remove = loongson3_cpufreq_remove,
+};
+module_platform_driver(loongson3_platform_driver);
+
+MODULE_AUTHOR("Huacai Chen <chenhuacai@loongson.cn>");
+MODULE_DESCRIPTION("CPUFreq driver for Loongson-3 processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
deleted file mode 100644
index f9306410a07f..000000000000
--- a/drivers/cpufreq/maple-cpufreq.c
+++ /dev/null
@@ -1,241 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Dmitry Eremin-Solenikov
- * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
- *
- * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
- * that is iMac G5 and latest single CPU desktop.
- */
-
-#undef DEBUG
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-#include <linux/of.h>
-
-#define DBG(fmt...) pr_debug(fmt)
-
-/* see 970FX user manual */
-
-#define SCOM_PCR 0x0aa001 /* PCR scom addr */
-
-#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
-#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
-#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
-#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
-#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
-#define PCR_SPEED_SHIFT 17
-#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
-#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
-#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
-#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
-#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
-#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
-
-#define SCOM_PSR 0x408001 /* PSR scom addr */
-/* warning: PSR is a 64 bits register */
-#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
-#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
-#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
-#define PSR_CUR_SPEED_SHIFT (56)
-
-/*
- * The G5 only supports two frequencies (Quarter speed is not supported)
- */
-#define CPUFREQ_HIGH 0
-#define CPUFREQ_LOW 1
-
-static struct cpufreq_frequency_table maple_cpu_freqs[] = {
- {0, CPUFREQ_HIGH, 0},
- {0, CPUFREQ_LOW, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/* Power mode data is an array of the 32 bits PCR values to use for
- * the various frequencies, retrieved from the device-tree
- */
-static int maple_pmode_cur;
-
-static const u32 *maple_pmode_data;
-static int maple_pmode_max;
-
-/*
- * SCOM based frequency switching for 970FX rev3
- */
-static int maple_scom_switch_freq(int speed_mode)
-{
- unsigned long flags;
- int to;
-
- local_irq_save(flags);
-
- /* Clear PCR high */
- scom970_write(SCOM_PCR, 0);
- /* Clear PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
- /* Set PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT |
- maple_pmode_data[speed_mode]);
-
- /* Wait for completion */
- for (to = 0; to < 10; to++) {
- unsigned long psr = scom970_read(SCOM_PSR);
-
- if ((psr & PSR_CMD_RECEIVED) == 0 &&
- (((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
- == 0)
- break;
- if (psr & PSR_CMD_COMPLETED)
- break;
- udelay(100);
- }
-
- local_irq_restore(flags);
-
- maple_pmode_cur = speed_mode;
- ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
-
- return 0;
-}
-
-static int maple_scom_query_freq(void)
-{
- unsigned long psr = scom970_read(SCOM_PSR);
- int i;
-
- for (i = 0; i <= maple_pmode_max; i++)
- if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
- break;
- return i;
-}
-
-/*
- * Common interface to the cpufreq core
- */
-
-static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- return maple_scom_switch_freq(index);
-}
-
-static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
-{
- return maple_cpu_freqs[maple_pmode_cur].frequency;
-}
-
-static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
- return 0;
-}
-
-static struct cpufreq_driver maple_cpufreq_driver = {
- .name = "maple",
- .flags = CPUFREQ_CONST_LOOPS,
- .init = maple_cpufreq_cpu_init,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = maple_cpufreq_target,
- .get = maple_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
-};
-
-static int __init maple_cpufreq_init(void)
-{
- struct device_node *cpunode;
- unsigned int psize;
- unsigned long max_freq;
- const u32 *valp;
- u32 pvr_hi;
- int rc = -ENODEV;
-
- /*
- * Behave here like powermac driver which checks machine compatibility
- * to ease merging of two drivers in future.
- */
- if (!of_machine_is_compatible("Momentum,Maple") &&
- !of_machine_is_compatible("Momentum,Apache"))
- return 0;
-
- /* Get first CPU node */
- cpunode = of_cpu_device_node_get(0);
- if (cpunode == NULL) {
- pr_err("Can't find any CPU 0 node\n");
- goto bail_noprops;
- }
-
- /* Check 970FX for now */
- /* we actually don't care on which CPU to access PVR */
- pvr_hi = PVR_VER(mfspr(SPRN_PVR));
- if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- pr_err("Unsupported CPU version (%x)\n", pvr_hi);
- goto bail_noprops;
- }
-
- /* Look for the powertune data in the device-tree */
- /*
- * On Maple this property is provided by PIBS in dual-processor config,
- * not provided by PIBS in CPU0 config and also not provided by SLOF,
- * so YMMV
- */
- maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
- if (!maple_pmode_data) {
- DBG("No power-mode-data !\n");
- goto bail_noprops;
- }
- maple_pmode_max = psize / sizeof(u32) - 1;
-
- /*
- * From what I see, clock-frequency is always the maximal frequency.
- * The current driver can not slew sysclk yet, so we really only deal
- * with powertune steps for now. We also only implement full freq and
- * half freq in this version. So far, I haven't yet seen a machine
- * supporting anything else.
- */
- valp = of_get_property(cpunode, "clock-frequency", NULL);
- if (!valp)
- goto bail_noprops;
- max_freq = (*valp)/1000;
- maple_cpu_freqs[0].frequency = max_freq;
- maple_cpu_freqs[1].frequency = max_freq/2;
-
- /* Force apply current frequency to make sure everything is in
- * sync (voltage is right for example). Firmware may leave us with
- * a strange setting ...
- */
- msleep(10);
- maple_pmode_cur = -1;
- maple_scom_switch_freq(maple_scom_query_freq());
-
- pr_info("Registering Maple CPU frequency driver\n");
- pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
- maple_cpu_freqs[1].frequency/1000,
- maple_cpu_freqs[0].frequency/1000,
- maple_cpu_freqs[maple_pmode_cur].frequency/1000);
-
- rc = cpufreq_register_driver(&maple_cpufreq_driver);
-
-bail_noprops:
- of_node_put(cpunode);
-
- return rc;
-}
-
-module_init(maple_cpufreq_init);
-
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index b22f5cc8a463..ae4500ab4891 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -10,8 +10,10 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
@@ -22,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -33,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -41,26 +52,57 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
if (!policy)
- return 0;
+ return -EINVAL;
data = policy->driver_data;
@@ -78,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -109,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -155,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -178,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -200,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -221,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -236,7 +309,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
@@ -258,9 +331,9 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -268,14 +341,12 @@ static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
iounmap(base);
release_mem_region(res->start, resource_size(res));
-
- return 0;
}
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -293,19 +364,48 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
- int ret;
+ int ret, cpu;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+
+ /* Make sure that all CPU supplies are available before proceeding. */
+ for_each_present_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "Failed to get cpu%d device\n", cpu);
+
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(cpu_reg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
+ "CPU%d regulator get failed\n", cpu);
+ }
+
data = of_device_get_match_data(&pdev->dev);
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -315,15 +415,14 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
-
- return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index fef68cb2b38f..052ca7cd2f4f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -313,8 +313,6 @@ out:
return ret;
}
-#define DYNAMIC_POWER "dynamic-power-coefficient"
-
static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -322,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -355,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
@@ -392,27 +389,24 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
int ret;
cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
- return -ENODEV;
- }
+ if (!cpu_dev)
+ return dev_err_probe(cpu_dev, -ENODEV, "failed to get cpu%d device\n", cpu);
info->cpu_dev = cpu_dev;
info->ccifreq_bound = false;
if (info->soc_data->ccifreq_supported) {
info->cci_dev = of_get_cci(info->cpu_dev);
- if (IS_ERR(info->cci_dev)) {
- ret = PTR_ERR(info->cci_dev);
- dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
- return -ENODEV;
- }
+ if (IS_ERR(info->cci_dev))
+ return dev_err_probe(cpu_dev, PTR_ERR(info->cci_dev),
+ "cpu%d: failed to get cci device\n",
+ cpu);
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
if (IS_ERR(info->cpu_clk)) {
ret = PTR_ERR(info->cpu_clk);
- return dev_err_probe(cpu_dev, ret,
- "cpu%d: failed to get cpu clk\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
+ goto out_put_cci_dev;
}
info->inter_clk = clk_get(cpu_dev, "intermediate");
@@ -433,7 +427,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
ret = regulator_enable(info->proc_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vproc\n", cpu);
goto out_free_proc_reg;
}
@@ -441,14 +435,17 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->sram_reg = regulator_get_optional(cpu_dev, "sram");
if (IS_ERR(info->sram_reg)) {
ret = PTR_ERR(info->sram_reg);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: Failed to get sram regulator\n", cpu);
goto out_disable_proc_reg;
+ }
info->sram_reg = NULL;
} else {
ret = regulator_enable(info->sram_reg);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable vsram\n", cpu);
goto out_free_sram_reg;
}
}
@@ -456,31 +453,34 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
if (ret) {
- dev_err(cpu_dev,
+ dev_err_probe(cpu_dev, ret,
"cpu%d: failed to get OPP-sharing information\n", cpu);
goto out_disable_sram_reg;
}
ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
if (ret) {
- dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: no OPP table\n", cpu);
goto out_disable_sram_reg;
}
ret = clk_prepare_enable(info->cpu_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable cpu clk\n", cpu);
goto out_free_opp_table;
+ }
ret = clk_prepare_enable(info->inter_clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to enable inter clk\n", cpu);
goto out_disable_mux_clock;
+ }
if (info->soc_data->ccifreq_supported) {
info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
if (info->vproc_on_boot < 0) {
- ret = info->vproc_on_boot;
- dev_err(info->cpu_dev,
- "invalid Vproc value: %d\n", info->vproc_on_boot);
+ ret = dev_err_probe(info->cpu_dev, info->vproc_on_boot,
+ "invalid Vproc value\n");
goto out_disable_inter_clock;
}
}
@@ -489,8 +489,8 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
rate = clk_get_rate(info->inter_clk);
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
if (IS_ERR(opp)) {
- dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
- ret = PTR_ERR(opp);
+ ret = dev_err_probe(cpu_dev, PTR_ERR(opp),
+ "cpu%d: failed to get intermediate opp\n", cpu);
goto out_disable_inter_clock;
}
info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
@@ -503,7 +503,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
if (ret) {
- dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to register opp notifier\n", cpu);
goto out_disable_inter_clock;
}
@@ -553,6 +553,10 @@ out_free_inter_clock:
out_free_mux_clock:
clk_put(info->cpu_clk);
+out_put_cci_dev:
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
+
return ret;
}
@@ -570,6 +574,8 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -601,13 +607,11 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+static void mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
-
- return 0;
}
static struct cpufreq_driver mtk_cpufreq_driver = {
@@ -621,7 +625,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_probe(struct platform_device *pdev)
@@ -631,38 +634,33 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
int cpu, ret;
data = dev_get_platdata(&pdev->dev);
- if (!data) {
- dev_err(&pdev->dev,
- "failed to get mtk cpufreq platform data\n");
- return -ENODEV;
- }
+ if (!data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
continue;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
- ret = -ENOMEM;
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "Failed to allocate dvfs_info\n");
goto release_dvfs_info_list;
}
info->soc_data = data;
ret = mtk_cpu_dvfs_info_init(info, cpu);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize dvfs info for cpu%d\n",
- cpu);
+ if (ret)
goto release_dvfs_info_list;
- }
list_add(&info->list_head, &dvfs_info_list);
}
ret = cpufreq_register_driver(&mtk_cpufreq_driver);
if (ret) {
- dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
+ dev_err_probe(&pdev->dev, ret, "failed to register mtk cpufreq driver\n");
goto release_dvfs_info_list;
}
@@ -709,6 +707,15 @@ static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
.ccifreq_supported = false,
};
+static const struct mtk_cpufreq_platform_data mt7988_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 900000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
.min_volt_shift = 100000,
.max_volt_shift = 200000,
@@ -737,11 +744,12 @@ static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
};
/* List of machines supported by this driver */
-static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+static const struct of_device_id mtk_cpufreq_machines[] __initconst __maybe_unused = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
{ .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt7988a", .data = &mt7988_platform_data },
{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
@@ -756,22 +764,14 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
static int __init mtk_cpufreq_driver_init(void)
{
- struct device_node *np;
- const struct of_device_id *match;
const struct mtk_cpufreq_platform_data *data;
int err;
- np = of_find_node_by_path("/");
- if (!np)
- return -ENODEV;
-
- match = of_match_node(mtk_cpufreq_machines, np);
- of_node_put(np);
- if (!match) {
+ data = of_machine_get_match_data(mtk_cpufreq_machines);
+ if (!data) {
pr_debug("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
- data = match->data;
err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 7f3cfe668f30..2aad4c04673c 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 81649a1969b6..bbb01d93b54b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -28,9 +28,6 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <asm/smp_plat.h>
-#include <asm/cpu.h>
-
/* OPP tolerance in percentage */
#define OPP_TOLERANCE 4
@@ -135,11 +132,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int omap_cpu_exit(struct cpufreq_policy *policy)
+static void omap_cpu_exit(struct cpufreq_policy *policy)
{
freq_table_free();
clk_put(policy->clk);
- return 0;
}
static struct cpufreq_driver omap_driver = {
@@ -151,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
- .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
@@ -182,11 +177,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
return cpufreq_register_driver(&omap_driver);
}
-static int omap_cpufreq_remove(struct platform_device *pdev)
+static void omap_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&omap_driver);
-
- return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index ef0a3216a386..69c19233fcd4 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 039a66bbe1be..a3931349360f 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -204,21 +204,19 @@ out:
return err;
}
-static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
/*
* We don't support CPU hotplug. Don't unmap after the system
* has already made it to a running state.
*/
if (system_state >= SYSTEM_RUNNING)
- return 0;
+ return;
if (sdcasr_mapbase)
iounmap(sdcasr_mapbase);
if (sdcpwr_mapbase)
iounmap(sdcpwr_mapbase);
-
- return 0;
}
static int pas_cpufreq_target(struct cpufreq_policy *policy,
@@ -247,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
- .attr = cpufreq_generic_attr,
};
/*
@@ -271,5 +268,6 @@ static void __exit pas_cpufreq_exit(void)
module_init(pas_cpufreq_init);
module_exit(pas_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 73efbcf5513b..ac2e90a65f0c 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -232,8 +232,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
- cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
@@ -562,18 +562,12 @@ out:
return result;
}
-static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
static struct cpufreq_driver pcc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.get = pcc_get_freq,
.verify = pcc_cpufreq_verify,
.target = pcc_cpufreq_target,
.init = pcc_cpufreq_cpu_init,
- .exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
};
@@ -608,15 +602,13 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int pcc_cpufreq_remove(struct platform_device *pdev)
+static void pcc_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&pcc_cpufreq_driver);
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
-
- return 0;
}
static struct platform_driver pcc_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ec75e79659ac..a22c22bd693a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -119,9 +120,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
/* tweak L2 for high voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 &= ~0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
}
#ifdef CONFIG_PPC_BOOK3S_32
@@ -130,9 +131,9 @@ static int cpu_750fx_cpu_speed(int low_speed)
if (low_speed == 1) {
/* tweak L2 for low voltage */
if (has_cpu_l2lve) {
- hid2 = mfspr(SPRN_HID2);
+ hid2 = mfspr(SPRN_HID2_750FX);
hid2 |= 0x2000;
- mtspr(SPRN_HID2, hid2);
+ mtspr(SPRN_HID2_750FX, hid2);
}
/* ramping down, set voltage last */
@@ -378,10 +379,9 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
static u32 read_gpio(struct device_node *np)
{
- const u32 *reg = of_get_property(np, "reg", NULL);
- u32 offset;
+ u64 offset;
- if (reg == NULL)
+ if (of_property_read_reg(np, 0, &offset, NULL) < 0)
return 0;
/* That works for all keylargos but shall be fixed properly
* some day... The problem is that it seems we can't rely
@@ -389,7 +389,6 @@ static u32 read_gpio(struct device_node *np)
* relative to the base of KeyLargo or to the base of the
* GPIO space, and the device-tree doesn't help.
*/
- offset = *reg;
if (offset < KEYLARGO_GPIO_LEVELS0)
offset += KEYLARGO_GPIO_LEVELS0;
return offset;
@@ -440,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 2cd2b06849a2..80897ec8f00e 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
};
@@ -505,7 +504,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
continue;
if (strcmp(loc, "CPU CLOCK"))
continue;
- if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ if (!of_property_present(hwclock, "platform-get-frequency"))
continue;
break;
}
@@ -671,4 +670,5 @@ static int __init g5_cpufreq_init(void)
module_init(g5_cpufreq_init);
+MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 41eefef95d87..99d2244e03b0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -219,7 +219,7 @@ have_busfreq:
}
-static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -234,10 +234,9 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
cpufreq_freq_transition_end(policy, &freqs, 0);
- break;
+ return;
}
}
- return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
@@ -254,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 5d515fc34836..31039330a3ba 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -219,13 +219,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -234,13 +234,13 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
@@ -644,7 +644,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernow_cpu_exit(struct cpufreq_policy *policy)
+static void powernow_cpu_exit(struct cpufreq_policy *policy)
{
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) {
@@ -655,7 +655,6 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
#endif
kfree(powernow_table);
- return 0;
}
static struct cpufreq_driver powernow_driver = {
@@ -668,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d289036beff2..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
@@ -1089,22 +1089,21 @@ err_out:
return -ENODEV;
}
-static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+static void powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
int cpu;
if (!data)
- return -EINVAL;
+ return;
powernow_k8_cpu_exit_acpi(data);
kfree(data->powernow_table);
kfree(data);
- for_each_cpu(cpu, pol->cpus)
+ /* pol->cpus will be empty here, use related_cpus instead. */
+ for_each_cpu(cpu, pol->related_cpus)
per_cpu(powernow_data, cpu) = NULL;
-
- return 0;
}
static void query_values_on_cpu(void *_err)
@@ -1144,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index fddbd1ea1635..7d9a5f656de8 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -18,9 +18,9 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
-#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -29,6 +29,9 @@
#include <asm/opal.h>
#include <linux/timer.h>
+#define CREATE_TRACE_POINTS
+#include "powernv-trace.h"
+
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
@@ -281,7 +284,7 @@ next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
- (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+ str_enabled_disabled(powernv_pstate_info.wof_enabled));
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
@@ -385,12 +388,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
-#define SCALING_BOOST_FREQS_ATTR_INDEX 2
-
static struct freq_attr *powernv_cpu_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
- &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -670,7 +669,8 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
*/
static void gpstate_timer_handler(struct timer_list *t)
{
- struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
+ struct global_pstate_info *gpstates = timer_container_of(gpstates, t,
+ timer);
struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
@@ -692,7 +692,7 @@ static void gpstate_timer_handler(struct timer_list *t)
}
/*
- * If PMCR was last updated was using fast_swtich then
+ * If PMCR was last updated was using fast_switch then
* We may have wrong in gpstate->last_lpstate_idx
* value. Hence, read from PMCR to get correct data.
*/
@@ -805,7 +805,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
- del_timer_sync(&gpstates->timer);
+ timer_delete_sync(&gpstates->timer);
gpstates_done:
freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
@@ -874,7 +874,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
@@ -883,11 +883,9 @@ static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
if (gpstates)
- del_timer_sync(&gpstates->timer);
+ timer_delete_sync(&gpstates->timer);
kfree(policy->driver_data);
-
- return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
@@ -1129,9 +1127,7 @@ static int __init powernv_cpufreq_init(void)
goto out;
if (powernv_pstate_info.wof_enabled)
- powernv_cpufreq_driver.boost_enabled = true;
- else
- powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+ powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
@@ -1139,9 +1135,6 @@ static int __init powernv_cpufreq_init(void)
goto cleanup;
}
- if (powernv_pstate_info.wof_enabled)
- cpufreq_enable_boost_support();
-
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
@@ -1162,5 +1155,6 @@ static void __exit powernv_cpufreq_exit(void)
}
module_exit(powernv_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/powernv-trace.h b/drivers/cpufreq/powernv-trace.h
new file mode 100644
index 000000000000..8cadb7c9427b
--- /dev/null
+++ b/drivers/cpufreq/powernv-trace.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_POWERNV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _POWERNV_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+TRACE_EVENT(powernv_throttle,
+
+ TP_PROTO(int chip_id, const char *reason, int pmax),
+
+ TP_ARGS(chip_id, reason, pmax),
+
+ TP_STRUCT__entry(
+ __field(int, chip_id)
+ __string(reason, reason)
+ __field(int, pmax)
+ ),
+
+ TP_fast_assign(
+ __entry->chip_id = chip_id;
+ __assign_str(reason);
+ __entry->pmax = pmax;
+ ),
+
+ TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+ __entry->pmax, __get_str(reason))
+);
+
+#endif /* _POWERNV_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE powernv-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
deleted file mode 100644
index e3313ce63b38..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {0, 1, 0},
- {0, 2, 0},
- {0, 3, 0},
- {0, 4, 0},
- {0, 5, 0},
- {0, 6, 0},
- {0, 8, 0},
- {0, 10, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pos;
- const u32 *max_freqp;
- u32 max_freq;
- int cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- of_node_put(cpu);
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- cpufreq_for_each_entry(pos, cbe_freqs) {
- pos->frequency = max_freq / pos->driver_data;
- pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- policy->freq_table = cbe_freqs;
- cbe_cpufreq_pmi_policy_init(policy);
- return 0;
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cbe_cpufreq_pmi_policy_exit(policy);
- return 0;
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int cbe_pmode_new)
-{
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].driver_data);
-
- return set_pmode(policy->cpu, cbe_pmode_new);
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- int ret;
-
- if (!machine_is(cell))
- return -ENODEV;
-
- cbe_cpufreq_pmi_init();
-
- ret = cpufreq_register_driver(&cbe_cpufreq_driver);
- if (ret)
- cbe_cpufreq_pmi_exit();
-
- return ret;
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
- cbe_cpufreq_pmi_exit();
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
deleted file mode 100644
index 00cd8633b0d9..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ppc_cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
-extern bool cbe_cpufreq_has_pmi;
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_init(void);
-void cbe_cpufreq_pmi_exit(void);
-#else
-#define cbe_cpufreq_has_pmi (0)
-static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_init(void) {}
-static inline void cbe_cpufreq_pmi_exit(void) {}
-#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
deleted file mode 100644
index 04830cd95333..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
deleted file mode 100644
index 4fba3637b115..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "ppc_cbe_cpufreq.h"
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- struct cpufreq_policy *policy;
- struct freq_qos_request *req;
- u8 node, slow_mode;
- int cpu, ret;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- cpu = cbe_node_to_cpu(node);
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- pr_warn("cpufreq policy not found cpu%d\n", cpu);
- return;
- }
-
- req = policy->driver_data;
-
- ret = freq_qos_update_request(req,
- policy->freq_table[slow_mode].frequency);
- if (ret < 0)
- pr_warn("Failed to update freq constraint: %d\n", ret);
- else
- pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
-
- cpufreq_cpu_put(policy);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req;
- int ret;
-
- if (!cbe_cpufreq_has_pmi)
- return;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return;
-
- ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
- policy->freq_table[0].frequency);
- if (ret < 0) {
- pr_err("Failed to add freq constraint (%d)\n", ret);
- kfree(req);
- return;
- }
-
- policy->driver_data = req;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req = policy->driver_data;
-
- if (cbe_cpufreq_has_pmi) {
- freq_qos_remove_request(req);
- kfree(req);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
-
-void cbe_cpufreq_pmi_init(void)
-{
- if (!pmi_register_handler(&cbe_pmi_handler))
- cbe_cpufreq_has_pmi = true;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
-
-void cbe_cpufreq_pmi_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- cbe_cpufreq_has_pmi = false;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f2830371d25f..8422704a3b10 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -28,7 +29,7 @@
#define GT_IRQ_STATUS BIT(2)
-#define MAX_FREQ_DOMAINS 3
+#define MAX_FREQ_DOMAINS 4
struct qcom_cpufreq_soc_data {
u32 reg_enable;
@@ -142,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
}
/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
unsigned int index;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -162,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -176,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return qcom_cpufreq_get_freq(cpu);
+ return qcom_cpufreq_get_freq(policy);
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -304,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
struct of_phandle_args args;
int cpu, ret;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
continue;
@@ -347,8 +349,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
throttled_freq = freq_hz / HZ_PER_KHZ;
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ /* Update HW pressure (the boost frequencies are accepted) */
+ arch_update_hw_pressure(policy->related_cpus, throttled_freq);
/*
* In the unlikely case policy is unregistered do not enable
@@ -362,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -440,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return data->throttle_irq;
data->cancel_throttle = false;
- data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
@@ -551,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
+ data->policy = policy;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -564,16 +566,10 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret)
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- }
-
return qcom_cpufreq_hw_lmh_init(policy, index);
}
-static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
struct qcom_cpufreq_data *data = policy->driver_data;
@@ -583,8 +579,6 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
-
- return 0;
}
static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
@@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
enable_irq(data->throttle_irq);
}
-static struct freq_attr *qcom_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- &cpufreq_freq_attr_scaling_boost_freqs,
- NULL
-};
-
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -615,19 +603,32 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
- .attr = qcom_cpufreq_hw_attr,
.ready = qcom_cpufreq_ready,
+ .set_boost = cpufreq_boost_set_sw,
};
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
- return qcom_lmh_get_throttle_freq(data);
+ return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
+}
+
+/*
+ * Since we cannot determine the closest rate of the target rate, let's just
+ * return the actual rate at which the clock is running at. This is needed to
+ * make clk_set_rate() API work properly.
+ */
+static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
+
+ return 0;
}
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
+ .determine_rate = qcom_cpufreq_hw_determine_rate,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -730,11 +731,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return ret;
}
-static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
+static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
-
- return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index a88b6fe5db50..81e16b5a0245 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -22,15 +22,29 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
#include <dt-bindings/arm/qcom,ids.h>
+enum ipq806x_versions {
+ IPQ8062_VERSION = 0,
+ IPQ8064_VERSION,
+ IPQ8065_VERSION,
+};
+
+#define IPQ6000_VERSION BIT(2)
+
+enum ipq8074_versions {
+ IPQ8074_HAWKEYE_VERSION = 0,
+ IPQ8074_ACORN_VERSION,
+};
+
struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
@@ -38,19 +52,43 @@ struct qcom_cpufreq_match_data {
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv);
- const char **genpd_names;
+ const char **pd_names;
+ unsigned int num_pd_names;
+};
+
+struct qcom_cpufreq_drv_cpu {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
};
struct qcom_cpufreq_drv {
- int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
+ struct qcom_cpufreq_drv_cpu cpus[];
};
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+static int qcom_cpufreq_simple_get_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u8 *speedbin;
+
+ *pvs_name = NULL;
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ dev_dbg(cpu_dev, "speedbin: %d\n", *speedbin);
+ drv->versions = 1 << *speedbin;
+ kfree(speedbin);
+ return 0;
+}
+
static void get_krait_bin_format_a(struct device *cpu_dev,
- int *speed, int *pvs, int *pvs_ver,
+ int *speed, int *pvs,
u8 *buf)
{
u32 pte_efuse;
@@ -149,8 +187,23 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
switch (msm_id) {
case QCOM_ID_MSM8996:
case QCOM_ID_APQ8096:
+ case QCOM_ID_IPQ5332:
+ case QCOM_ID_IPQ5322:
+ case QCOM_ID_IPQ5312:
+ case QCOM_ID_IPQ5302:
+ case QCOM_ID_IPQ5300:
+ case QCOM_ID_IPQ5321:
+ case QCOM_ID_IPQ9514:
+ case QCOM_ID_IPQ9550:
+ case QCOM_ID_IPQ9554:
+ case QCOM_ID_IPQ9570:
+ case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -181,8 +234,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
switch (len) {
case 4:
- get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
- speedbin);
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
break;
case 8:
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
@@ -204,6 +256,181 @@ len_error:
return ret;
}
+static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unused = {
+ { .compatible = "qcom,ipq8062", .data = (const void *)QCOM_ID_IPQ8062 },
+ { .compatible = "qcom,ipq8064", .data = (const void *)QCOM_ID_IPQ8064 },
+ { .compatible = "qcom,ipq8065", .data = (const void *)QCOM_ID_IPQ8065 },
+ { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
+ { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
+ { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
+};
+
+static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ int msm_id = -1, ret = 0;
+ int speed = 0, pvs = 0;
+ u8 *speedbin;
+ size_t len;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ if (len != 4) {
+ dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret == -ENODEV) {
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Fallback to compatible match with no SMEM initialized */
+ match = of_match_node(qcom_cpufreq_ipq806x_match_list, root);
+ of_node_put(root);
+ if (!match) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* We found a matching device, get the msm_id from the data entry */
+ msm_id = (int)(uintptr_t)match->data;
+ ret = 0;
+ } else if (ret) {
+ goto exit;
+ }
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ8062:
+ drv->versions = BIT(IPQ8062_VERSION);
+ break;
+ case QCOM_ID_IPQ8064:
+ case QCOM_ID_IPQ8066:
+ case QCOM_ID_IPQ8068:
+ drv->versions = BIT(IPQ8064_VERSION);
+ break;
+ case QCOM_ID_IPQ8065:
+ case QCOM_ID_IPQ8069:
+ drv->versions = BIT(IPQ8065_VERSION);
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ8064 family, limiting to 1.0GHz!\n",
+ msm_id);
+ drv->versions = BIT(IPQ8062_VERSION);
+ break;
+ }
+
+ /* IPQ8064 speed is never fused. Only pvs values are fused. */
+ snprintf(*pvs_name, sizeof("speed0-pvsXX"), "speed0-pvs%d", pvs);
+
+exit:
+ kfree(speedbin);
+ return ret;
+}
+
+static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u32 msm_id;
+ int ret;
+ u8 *speedbin;
+ *pvs_name = NULL;
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret)
+ return ret;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ6005:
+ case QCOM_ID_IPQ6010:
+ case QCOM_ID_IPQ6018:
+ case QCOM_ID_IPQ6028:
+ /* Fuse Value Freq BIT to set
+ * ---------------------------------
+ * 2’b0 No Limit BIT(0)
+ * 2’b1 1.5 GHz BIT(1)
+ */
+ drv->versions = 1 << (unsigned int)(*speedbin);
+ break;
+ case QCOM_ID_IPQ6000:
+ /*
+ * IPQ6018 family only has one bit to advertise the CPU
+ * speed-bin, but that is not enough for IPQ6000 which
+ * is only rated up to 1.2GHz.
+ * So for IPQ6000 manually set BIT(2) based on SMEM ID.
+ */
+ drv->versions = IPQ6000_VERSION;
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n",
+ msm_id);
+ drv->versions = IPQ6000_VERSION;
+ break;
+ }
+
+ kfree(speedbin);
+ return 0;
+}
+
+static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u32 msm_id;
+ int ret;
+ *pvs_name = NULL;
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret)
+ return ret;
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ8070A:
+ case QCOM_ID_IPQ8071A:
+ case QCOM_ID_IPQ8172:
+ case QCOM_ID_IPQ8173:
+ case QCOM_ID_IPQ8174:
+ drv->versions = BIT(IPQ8074_ACORN_VERSION);
+ break;
+ case QCOM_ID_IPQ8072A:
+ case QCOM_ID_IPQ8074A:
+ case QCOM_ID_IPQ8076A:
+ case QCOM_ID_IPQ8078A:
+ drv->versions = BIT(IPQ8074_HAWKEYE_VERSION);
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ8074 family, limiting to 1.4GHz!\n",
+ msm_id);
+ drv->versions = BIT(IPQ8074_ACORN_VERSION);
+ break;
+ }
+
+ return 0;
+}
+
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
@@ -212,17 +439,45 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
.get_version = qcom_cpufreq_krait_name_version,
};
-static const char *qcs404_genpd_names[] = { "cpr", NULL };
+static const struct qcom_cpufreq_match_data match_data_msm8909 = {
+ .get_version = qcom_cpufreq_simple_get_version,
+ .pd_names = (const char *[]) { "perf" },
+ .num_pd_names = 1,
+};
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
- .genpd_names = qcs404_genpd_names,
+ .pd_names = (const char *[]) { "cpr" },
+ .num_pd_names = 1,
+};
+
+static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
+ .get_version = qcom_cpufreq_ipq6018_name_version,
+};
+
+static const struct qcom_cpufreq_match_data match_data_ipq8064 = {
+ .get_version = qcom_cpufreq_ipq8064_name_version,
+};
+
+static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
+ .get_version = qcom_cpufreq_ipq8074_name_version,
};
+static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+{
+ struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list;
+ int i;
+
+ if (!pd_list)
+ return;
+
+ for (i = 0; i < pd_list->num_pds; i++)
+ device_set_awake_path(pd_list->pd_devs[i]);
+}
+
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
struct nvmem_cell *speedbin_nvmem;
- struct device_node *np;
struct device *cpu_dev;
char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
char *pvs_name = pvs_name_buffer;
@@ -234,53 +489,42 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
- if (!ret) {
- of_node_put(np);
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
+ of_device_is_compatible(np, "operating-points-v2-krait-cpu");
+ if (!ret)
return -ENOENT;
- }
- drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
+ GFP_KERNEL);
if (!drv)
return -ENOMEM;
match = pdev->dev.platform_data;
drv->data = match->data;
- if (!drv->data) {
- ret = -ENODEV;
- goto free_drv;
- }
+ if (!drv->data)
+ return -ENODEV;
if (drv->data->get_version) {
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- if (IS_ERR(speedbin_nvmem)) {
- ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
- "Could not get nvmem cell\n");
- goto free_drv;
- }
+ if (IS_ERR(speedbin_nvmem))
+ return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+ "Could not get nvmem cell\n");
ret = drv->data->get_version(cpu_dev,
speedbin_nvmem, &pvs_name, drv);
if (ret) {
nvmem_cell_put(speedbin_nvmem);
- goto free_drv;
+ return ret;
}
nvmem_cell_put(speedbin_nvmem);
}
- of_node_put(np);
- drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
- GFP_KERNEL);
- if (!drv->opp_tokens) {
- ret = -ENOMEM;
- goto free_drv;
- }
-
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -299,19 +543,28 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
config.prop_name = pvs_name;
}
- if (drv->data->genpd_names) {
- config.genpd_names = drv->data->genpd_names;
- config.virt_devs = NULL;
- }
-
- if (config.supported_hw || config.genpd_names) {
- drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
- if (drv->opp_tokens[cpu] < 0) {
- ret = drv->opp_tokens[cpu];
+ if (config.supported_hw) {
+ drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->cpus[cpu].opp_token < 0) {
+ ret = drv->cpus[cpu].opp_token;
dev_err(cpu_dev, "Failed to set OPP config\n");
goto free_opp;
}
}
+
+ if (drv->data->pd_names) {
+ struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = drv->data->pd_names,
+ .num_pd_names = drv->data->num_pd_names,
+ .pd_flags = PD_FLAG_DEV_LINK_ON |
+ PD_FLAG_REQUIRED_OPP,
+ };
+
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &drv->cpus[cpu].pd_list);
+ if (ret < 0)
+ goto free_opp;
+ }
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -325,45 +578,60 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->opp_tokens);
-free_drv:
- kfree(drv);
-
+ for_each_present_cpu(cpu) {
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
+ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
return ret;
}
-static int qcom_cpufreq_remove(struct platform_device *pdev)
+static void qcom_cpufreq_remove(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ for_each_present_cpu(cpu) {
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
+ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
+}
+
+static int qcom_cpufreq_suspend(struct device *dev)
+{
+ struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
+ unsigned int cpu;
- kfree(drv->opp_tokens);
- kfree(drv);
+ for_each_present_cpu(cpu)
+ qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
}
+static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
+
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
+ .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
},
};
-static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
+static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
+ { .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
- { .compatible = "qcom,ipq8064", .data = &match_data_krait },
+ { .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
+ { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
+ { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
+ { .compatible = "qcom,ipq9574", .data = &match_data_kryo },
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
{},
@@ -378,7 +646,7 @@ MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
*/
static int __init qcom_cpufreq_init(void)
{
- struct device_node *np = of_find_node_by_path("/");
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
int ret;
@@ -386,7 +654,6 @@ static int __init qcom_cpufreq_init(void)
return -ENODEV;
match = of_match_node(qcom_cpufreq_match_list, np);
- of_node_put(np);
if (!match)
return -ENODEV;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 573b417e1483..8d1f5ac59132 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -225,7 +225,7 @@ err_np:
return -ENODEV;
}
-static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
@@ -233,8 +233,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
kfree(data->table);
kfree(data);
policy->driver_data = NULL;
-
- return 0;
}
static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
@@ -256,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
@@ -288,11 +285,9 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
return 0;
}
-static int qoriq_cpufreq_remove(struct platform_device *pdev)
+static void qoriq_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&qoriq_cpufreq_driver);
-
- return 0;
}
static struct platform_driver qoriq_cpufreq_platform_driver = {
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
index 2bc7d9734272..5050932954e3 100644
--- a/drivers/cpufreq/raspberrypi-cpufreq.c
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -65,7 +65,7 @@ remove_opp:
return ret;
}
-static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
+static void raspberrypi_cpufreq_remove(struct platform_device *pdev)
{
struct device *cpu_dev;
@@ -74,8 +74,6 @@ static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
dev_pm_opp_remove_all_dynamic(cpu_dev);
platform_device_unregister(cpufreq_dt);
-
- return 0;
}
/*
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..31e07f0279db
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{name}-supply")).ok()?;
+ dev.fwnode()?
+ .property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ (cpu.as_u32() == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))
+ .and_then(|name| kernel::kvec![name].ok())
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::DEFAULT_TRANSITION_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(Self {})
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ authors: ["Viresh Kumar <viresh.kumar@linaro.org>"],
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c6bdfc308e99..9cef71528076 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
unsigned int vddarm_max;
};
+#ifdef CONFIG_REGULATOR
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
+#endif
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct s3c64xx_dvfs *dvfs;
- unsigned int old_freq, new_freq;
+ unsigned int new_freq = s3c64xx_freq_table[index].frequency;
int ret;
+#ifdef CONFIG_REGULATOR
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq;
+
old_freq = clk_get_rate(policy->clk) / 1000;
- new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
-#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..ba8a1c96427a 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) {
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/*
@@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/* Find current refresh counter and frequency each DMC */
@@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
+out:
+ clk_put(dmc1_clk);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
@@ -554,17 +556,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 330c8d6cf93c..b360f03a116f 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
@@ -92,7 +91,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index f34e6382a4c5..d2a110079f5f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,7 +15,9 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,18 +28,27 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct scmi_data *priv;
unsigned long rate;
int ret;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
@@ -62,25 +73,45 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
+ unsigned long freq = target_freq;
- if (!perf_ops->freq_set(ph, priv->domain_id,
- target_freq * 1000, true))
+ if (!perf_ops->freq_set(ph, priv->domain_id, freq * 1000, true))
return target_freq;
return 0;
}
+static int scmi_cpu_domain_id(struct device *cpu_dev)
+{
+ struct device_node *np = cpu_dev->of_node;
+ struct of_phandle_args domain_id;
+ int index;
+
+ if (of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
+ &domain_id)) {
+ /* Find the corresponding index for power-domain "perf". */
+ index = of_property_match_string(np, "power-domain-names",
+ "perf");
+ if (index < 0)
+ return -EINVAL;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", index,
+ &domain_id))
+ return -EINVAL;
+ }
+
+ return domain_id.args[0];
+}
+
static int
-scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
+ struct cpumask *cpumask)
{
- int cpu, domain, tdomain;
+ int cpu, tdomain;
struct device *tcpu_dev;
- domain = perf_ops->device_domain_id(cpu_dev);
- if (domain < 0)
- return domain;
-
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -88,7 +119,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (!tcpu_dev)
continue;
- tdomain = perf_ops->device_domain_id(tcpu_dev);
+ tdomain = scmi_cpu_domain_id(tcpu_dev);
if (tdomain == domain)
cpumask_set_cpu(cpu, cpumask);
}
@@ -104,7 +135,7 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
unsigned long Hz;
int ret, domain;
- domain = perf_ops->device_domain_id(cpu_dev);
+ domain = scmi_cpu_domain_id(cpu_dev);
if (domain < 0)
return domain;
@@ -124,13 +155,53 @@ scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
return 0;
}
+static int
+scmi_get_rate_limit(u32 domain, bool has_fast_switch)
+{
+ int ret, rate_limit;
+
+ if (has_fast_switch) {
+ /*
+ * Fast channels are used whenever available,
+ * so use their rate_limit value if populated.
+ */
+ ret = perf_ops->fast_switch_rate_limit(ph, domain,
+ &rate_limit);
+ if (!ret && rate_limit)
+ return rate_limit;
+ }
+
+ ret = perf_ops->rate_limit_get(ph, domain, &rate_limit);
+ if (ret)
+ return 0;
+
+ return rate_limit;
+}
+
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret, nr_opp;
+ int ret, nr_opp, domain;
unsigned int latency;
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -138,6 +209,10 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
+ domain = scmi_cpu_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -148,7 +223,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
/* Obtain CPUs that share SCMI performance controls */
- ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
+ ret = scmi_get_sharing_cpus(cpu_dev, domain, policy->cpus);
if (ret) {
dev_warn(cpu_dev, "failed to get sharing cpumask\n");
goto out_free_cpumask;
@@ -176,7 +251,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
*/
nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
if (nr_opp <= 0) {
- ret = perf_ops->device_opps_add(ph, cpu_dev);
+ ret = perf_ops->device_opps_add(ph, cpu_dev, domain);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
goto out_free_cpumask;
@@ -209,7 +284,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
priv->cpu_dev = cpu_dev;
- priv->domain_id = perf_ops->device_domain_id(cpu_dev);
+ priv->domain_id = domain;
policy->driver_data = priv;
policy->freq_table = freq_table;
@@ -217,17 +292,39 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
- latency = perf_ops->transition_latency_get(ph, cpu_dev);
+ latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible =
- perf_ops->fast_switch_possible(ph, cpu_dev);
+ perf_ops->fast_switch_possible(ph, domain);
+
+ policy->transition_delay_us =
+ scmi_get_rate_limit(domain, policy->fast_switch_possible);
+
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
+ }
+
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
return 0;
+out_free_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
@@ -240,16 +337,20 @@ out_free_priv:
return ret;
}
-static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
-
- return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
@@ -284,15 +385,58 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
+ .set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -301,17 +445,22 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
#ifdef CONFIG_COMMON_CLK
/* dummy clock provider as needed by OPP if clocks property is used */
- if (of_property_present(dev->of_node, "#clock-cells"))
- devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (of_property_present(dev->of_node, "#clock-cells")) {
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
+ }
#endif
ret = cpufreq_register_driver(&scmi_cpufreq_driver);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index fd2c16821d54..e530345baddf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -14,7 +14,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scpi_data *priv = policy->driver_data;
- unsigned long rate = clk_get_rate(priv->clk);
+ struct cpufreq_policy *policy;
+ struct scpi_data *priv;
+ unsigned long rate;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+ rate = clk_get_rate(priv->clk);
return rate / 1000;
}
@@ -39,8 +46,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- u64 rate = policy->freq_table[index].frequency * 1000;
+ unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -48,7 +56,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
- if (clk_get_rate(priv->clk) != rate)
+ if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;
@@ -64,7 +72,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (domain < 0)
return domain;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -149,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -167,7 +175,7 @@ out_free_opp:
return ret;
}
-static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+static void scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
@@ -175,8 +183,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
-
- return 0;
}
static struct cpufreq_driver scpi_cpufreq_driver = {
@@ -185,7 +191,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
@@ -208,11 +213,10 @@ static int scpi_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int scpi_cpufreq_remove(struct platform_device *pdev)
+static void scpi_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
- return 0;
}
static struct platform_driver scpi_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index b8704232c27b..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
@@ -135,14 +133,12 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static void sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
clk_put(cpuclk);
-
- return 0;
}
static struct cpufreq_driver sh_cpufreq_driver = {
@@ -153,7 +149,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 2783d3d55fce..15899dd77c08 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -296,10 +296,9 @@ static int us2e_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
us2e_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us2e_driver = {
@@ -324,7 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 6c3657679a88..de50a2f3b124 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -140,10 +140,9 @@ static int us3_freq_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+static void us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
us3_freq_target(policy, 0);
- return 0;
}
static struct cpufreq_driver cpufreq_us3_driver = {
@@ -172,7 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 78b875db6b66..2a1550e1aa21 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -165,16 +165,14 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- const struct property *prop;
struct cpufreq_frequency_table *freq_tbl;
- const __be32 *val;
- int cnt, i, ret;
+ u32 val;
+ int cnt, ret, i = 0;
np = of_cpu_device_node_get(0);
if (!np) {
@@ -184,28 +182,25 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
- spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+ spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
- prop = of_find_property(np, "cpufreq_tbl", NULL);
- if (!prop || !prop->value) {
+ cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
+ if (cnt <= 0) {
pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
- cnt = prop->length / sizeof(u32);
- val = prop->value;
-
freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < cnt; i++)
- freq_tbl[i].frequency = be32_to_cpup(val++);
+ of_property_for_each_u32(np, "cpufreq_tbl", val)
+ freq_tbl[i++].frequency = val;
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+ freq_tbl[cnt].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 75b10ecdb60f..3e6e85a92212 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -400,16 +400,12 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int centrino_cpu_exit(struct cpufreq_policy *policy)
+static void centrino_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- if (!per_cpu(centrino_model, cpu))
- return -ENODEV;
-
- per_cpu(centrino_model, cpu) = NULL;
-
- return 0;
+ if (per_cpu(centrino_model, cpu))
+ per_cpu(centrino_model, cpu) = NULL;
}
/**
@@ -511,7 +507,6 @@ static struct cpufreq_driver centrino_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
- .attr = cpufreq_generic_attr,
};
/*
@@ -520,10 +515,10 @@ static struct cpufreq_driver centrino_driver = {
* or ASCII model IDs.
*/
static const struct x86_cpu_id centrino_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL),
+ X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL),
{}
};
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index f2076d72bf39..262cfbde9ca7 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = {
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0ce9d4b6dfcc..39265884c3f1 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 1a63aeea8711..b15b3142b5fe 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -13,12 +13,12 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#define VERSION_ELEMENTS 3
-#define MAX_PCODE_NAME_LEN 7
+#define MAX_PCODE_NAME_LEN 16
#define VERSION_SHIFT 28
#define HW_INFO_INDEX 1
@@ -267,7 +267,7 @@ static int __init sti_cpufreq_init(void)
goto skip_voltage_scaling;
}
- if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
+ if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) {
dev_err(ddata.cpu, "OPP-v2 not supported\n");
goto skip_voltage_scaling;
}
@@ -293,6 +293,7 @@ module_init(sti_cpufreq_init);
static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
{ .compatible = "st,stih407" },
{ .compatible = "st,stih410" },
+ { .compatible = "st,stih418" },
{ },
};
MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 4321d7bbe769..4fffc8e83692 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/arm-smccc.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -18,25 +19,183 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define MAX_NAME_LEN 7
-
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
+#define SUN50I_A100_NVMEM_MASK 0xf
+#define SUN50I_A100_NVMEM_SHIFT 12
+
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+struct sunxi_cpufreq_data {
+ u32 (*efuse_xlate)(u32 speedbin);
+};
+
+static u32 sun50i_h6_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ return efuse_value - 1;
+ else
+ return 0;
+}
+
+static u32 sun50i_a100_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> SUN50I_A100_NVMEM_SHIFT) &
+ SUN50I_A100_NVMEM_MASK;
+
+ switch (efuse_value) {
+ case 0b100:
+ return 2;
+ case 0b010:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int get_soc_id_revision(void)
+{
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ return arm_smccc_get_soc_id_revision();
+#else
+ return SMCCC_RET_NOT_SUPPORTED;
+#endif
+}
+
+/*
+ * Judging by the OPP tables in the vendor BSP, the quality order of the
+ * returned speedbin index is 4 -> 0/2 -> 3 -> 1, from worst to best.
+ * 0 and 2 seem identical from the OPP tables' point of view.
+ */
+static u32 sun50i_h616_efuse_xlate(u32 speedbin)
+{
+ int ver_bits = get_soc_id_revision();
+ u32 value = 0;
+
+ switch (speedbin & 0xffff) {
+ case 0x2000:
+ value = 0;
+ break;
+ case 0x2400:
+ case 0x7400:
+ case 0x2c00:
+ case 0x7c00:
+ if (ver_bits != SMCCC_RET_NOT_SUPPORTED && ver_bits <= 1) {
+ /* ic version A/B */
+ value = 1;
+ } else {
+ /* ic version C and later version */
+ value = 2;
+ }
+ break;
+ case 0x5000:
+ case 0x5400:
+ case 0x6000:
+ value = 3;
+ break;
+ case 0x5c00:
+ value = 4;
+ break;
+ case 0x5d00:
+ value = 0;
+ break;
+ case 0x6c00:
+ value = 5;
+ break;
+ default:
+ pr_warn("sun50i-cpufreq-nvmem: unknown speed bin 0x%x, using default bin 0\n",
+ speedbin & 0xffff);
+ value = 0;
+ break;
+ }
+
+ return value;
+}
+
+static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
+ .efuse_xlate = sun50i_h6_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_a100_cpufreq_data = {
+ .efuse_xlate = sun50i_a100_efuse_xlate,
+};
+
+static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
+ .efuse_xlate = sun50i_h616_efuse_xlate,
+};
+
+static const struct of_device_id cpu_opp_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6-operating-points",
+ .data = &sun50i_h6_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-a100-operating-points",
+ .data = &sun50i_a100_cpufreq_data,
+ },
+ { .compatible = "allwinner,sun50i-h616-operating-points",
+ .data = &sun50i_h616_cpufreq_data,
+ },
+ {}
+};
+
+/**
+ * dt_has_supported_hw() - Check if any OPPs use opp-supported-hw
+ *
+ * If we ask the cpufreq framework to use the opp-supported-hw feature, it
+ * will ignore every OPP node without that DT property. If none of the OPPs
+ * have it, the driver will fail probing, due to the lack of OPPs.
+ *
+ * Returns true if we have at least one OPP with the opp-supported-hw property.
+ */
+static bool dt_has_supported_hw(void)
+{
+ bool has_opp_supported_hw = false;
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return false;
+
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return false;
+
+ for_each_child_of_node_scoped(np, opp) {
+ if (of_property_present(opp, "opp-supported-hw")) {
+ has_opp_supported_hw = true;
+ break;
+ }
+ }
+
+ return has_opp_supported_hw;
+}
+
/**
* sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
- * @versions: Set to the value parsed from efuse
*
- * Returns 0 if success.
+ * Returns non-negative speed bin index on success, a negative error
+ * value otherwise.
*/
-static int sun50i_cpufreq_get_efuse(u32 *versions)
+static int sun50i_cpufreq_get_efuse(void)
{
+ const struct sunxi_cpufreq_data *opp_data;
struct nvmem_cell *speedbin_nvmem;
- struct device_node *np;
+ const struct of_device_id *match;
struct device *cpu_dev;
- u32 *speedbin, efuse_value;
+ void *speedbin_ptr;
+ u32 speedbin = 0;
size_t len;
int ret;
@@ -44,50 +203,45 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
if (!cpu_dev)
return -ENODEV;
- np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ struct device_node *np __free(device_node) =
+ dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np,
- "allwinner,sun50i-h6-operating-points");
- if (!ret) {
- of_node_put(np);
+ match = of_match_node(cpu_opp_match_list, np);
+ if (!match)
return -ENOENT;
- }
+
+ opp_data = match->data;
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- of_node_put(np);
if (IS_ERR(speedbin_nvmem))
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
+ if (IS_ERR(speedbin_ptr))
+ return PTR_ERR(speedbin_ptr);
- efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+ if (len <= 4)
+ memcpy(&speedbin, speedbin_ptr, len);
+ speedbin = le32_to_cpu(speedbin);
- /*
- * We treat unexpected efuse values as if the SoC was from
- * the slowest bin. Expected efuse values are 1-3, slowest
- * to fastest.
- */
- if (efuse_value >= 1 && efuse_value <= 3)
- *versions = efuse_value - 1;
- else
- *versions = 0;
+ ret = opp_data->efuse_xlate(speedbin);
+
+ kfree(speedbin_ptr);
- kfree(speedbin);
- return 0;
+ return ret;
};
static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
{
int *opp_tokens;
- char name[MAX_NAME_LEN];
- unsigned int cpu;
- u32 speed = 0;
+ char name[] = "speedXXXXXXXXXXX"; /* Integers can take 11 chars max */
+ unsigned int cpu, supported_hw;
+ struct dev_pm_opp_config config = {};
+ int speed;
int ret;
opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
@@ -95,15 +249,26 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
if (!opp_tokens)
return -ENOMEM;
- ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret) {
+ speed = sun50i_cpufreq_get_efuse();
+ if (speed < 0) {
kfree(opp_tokens);
- return ret;
+ return speed;
+ }
+
+ /*
+ * We need at least one OPP with the "opp-supported-hw" property,
+ * or else the upper layers will ignore every OPP and will bail out.
+ */
+ if (dt_has_supported_hw()) {
+ supported_hw = 1U << speed;
+ config.supported_hw = &supported_hw;
+ config.supported_hw_count = 1;
}
- snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+ snprintf(name, sizeof(name), "speed%d", speed);
+ config.prop_name = name;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
@@ -111,12 +276,11 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
goto free_opp;
}
- opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
- if (opp_tokens[cpu] < 0) {
- ret = opp_tokens[cpu];
- pr_err("Failed to set prop name\n");
+ ret = dev_pm_opp_set_config(cpu_dev, &config);
+ if (ret < 0)
goto free_opp;
- }
+
+ opp_tokens[cpu] = ret;
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -130,26 +294,24 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ for_each_present_cpu(cpu)
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
return ret;
}
-static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
+static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
- dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ for_each_present_cpu(cpu)
+ dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
-
- return 0;
}
static struct platform_driver sun50i_cpufreq_driver = {
@@ -162,22 +324,14 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-a100" },
+ { .compatible = "allwinner,sun50i-h616" },
+ { .compatible = "allwinner,sun50i-h618" },
+ { .compatible = "allwinner,sun50i-h700" },
{}
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
-static const struct of_device_id *sun50i_cpufreq_match_node(void)
-{
- const struct of_device_id *match;
- struct device_node *np;
-
- np = of_find_node_by_path("/");
- match = of_match_node(sun50i_cpufreq_match_list, np);
- of_node_put(np);
-
- return match;
-}
-
/*
* Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
* all the real activity is done in the probe, which may be defered as well.
@@ -185,11 +339,9 @@ static const struct of_device_id *sun50i_cpufreq_match_node(void)
*/
static int __init sun50i_cpufreq_init(void)
{
- const struct of_device_id *match;
int ret;
- match = sun50i_cpufreq_match_node();
- if (!match)
+ if (!of_machine_device_match(sun50i_cpufreq_match_list))
return -ENODEV;
ret = platform_driver_register(&sun50i_cpufreq_driver);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index aae951d4e77c..f8a76bbecef9 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -16,6 +16,10 @@
#include <linux/pm_opp.h>
#include <linux/types.h>
+#include "cpufreq-dt.h"
+
+static struct platform_device *tegra124_cpufreq_pdev;
+
struct tegra124_cpufreq_priv {
struct clk *cpu_clk;
struct clk *pllp_clk;
@@ -52,12 +56,14 @@ out:
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
- struct device_node *np;
struct device *cpu_dev;
- struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
+ if (!np)
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -66,15 +72,9 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!cpu_dev)
return -ENODEV;
- np = of_cpu_device_node_get(0);
- if (!np)
- return -ENODEV;
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
- if (IS_ERR(priv->cpu_clk)) {
- ret = PTR_ERR(priv->cpu_clk);
- goto out_put_np;
- }
+ if (IS_ERR(priv->cpu_clk))
+ return PTR_ERR(priv->cpu_clk);
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
if (IS_ERR(priv->dfll_clk)) {
@@ -98,11 +98,7 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto out_put_pllp_clk;
- cpufreq_dt_devinfo.name = "cpufreq-dt";
- cpufreq_dt_devinfo.parent = &pdev->dev;
-
- priv->cpufreq_dt_pdev =
- platform_device_register_full(&cpufreq_dt_devinfo);
+ priv->cpufreq_dt_pdev = cpufreq_dt_pdev_register(&pdev->dev);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
goto out_put_pllp_clk;
@@ -110,8 +106,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- of_node_put(np);
-
return 0;
out_put_pllp_clk:
@@ -122,8 +116,6 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_np:
- of_node_put(np);
return ret;
}
@@ -180,6 +172,21 @@ disable_cpufreq:
return err;
}
+static void tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!IS_ERR(priv->cpufreq_dt_pdev)) {
+ platform_device_unregister(priv->cpufreq_dt_pdev);
+ priv->cpufreq_dt_pdev = ERR_PTR(-ENODEV);
+ }
+
+ clk_put(priv->pllp_clk);
+ clk_put(priv->pllx_clk);
+ clk_put(priv->dfll_clk);
+ clk_put(priv->cpu_clk);
+}
+
static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
tegra124_cpufreq_resume)
@@ -189,15 +196,16 @@ static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.driver.pm = &tegra124_cpufreq_pm_ops,
.probe = tegra124_cpufreq_probe,
+ .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
{
int ret;
- struct platform_device *pdev;
- if (!(of_machine_is_compatible("nvidia,tegra124") ||
- of_machine_is_compatible("nvidia,tegra210")))
+ if (!(of_machine_is_compatible("nvidia,tegra114") ||
+ of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
@@ -208,15 +216,25 @@ static int __init tegra_cpufreq_init(void)
if (ret)
return ret;
- pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ tegra124_cpufreq_pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(tegra124_cpufreq_pdev)) {
platform_driver_unregister(&tegra124_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(tegra124_cpufreq_pdev);
}
return 0;
}
module_init(tegra_cpufreq_init);
+static void __exit tegra_cpufreq_module_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tegra124_cpufreq_pdev))
+ platform_device_unregister(tegra124_cpufreq_pdev);
+
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+}
+module_exit(tegra_cpufreq_module_exit);
+
MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index f98f53bf1011..34ed943c5f34 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
@@ -58,7 +59,7 @@ static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
};
struct tegra186_cpufreq_cluster {
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 ref_clk_khz;
u32 div;
};
@@ -66,18 +67,142 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
const struct tegra186_cpufreq_cpu *cpus;
+ bool icc_dram_bw_scaling;
struct tegra186_cpufreq_cluster clusters[];
};
+static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(dev, freq_khz * HZ_PER_KHZ, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret)
+ data->icc_dram_bw_scaling = false;
+
+ return ret;
+}
+
+static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *bpmp_lut,
+ struct cpufreq_frequency_table **opp_table)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *freq_table = NULL;
+ struct cpufreq_frequency_table *pos;
+ struct device *cpu_dev;
+ unsigned long rate;
+ int ret, max_opps;
+ int j = 0;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
+ return -ENODEV;
+ }
+
+ /* Initialize OPP table mentioned in operating-points-v2 property in DT */
+ ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
+ if (ret) {
+ dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
+ data->icc_dram_bw_scaling = false;
+ return ret;
+ }
+
+ max_opps = dev_pm_opp_get_opp_count(cpu_dev);
+ if (max_opps <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ return max_opps;
+ }
+
+ /* Disable all opps and cross-validate against LUT later */
+ for (rate = 0; ; rate++) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return -ENOMEM;
+
+ /*
+ * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
+ * Enable only those DT OPP's which are present in LUT also.
+ */
+ cpufreq_for_each_valid_entry(pos, bpmp_lut) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * HZ_PER_KHZ, false);
+ if (IS_ERR(opp))
+ continue;
+
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * HZ_PER_KHZ);
+ if (ret < 0)
+ return ret;
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = pos->frequency;
+ j++;
+ }
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = CPUFREQ_TABLE_END;
+
+ *opp_table = &freq_table[0];
+
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ /* Prime interconnect data */
+ tegra_cpufreq_set_bw(policy, freq_table[j - 1].frequency);
+
+ return ret;
+}
+
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_frequency_table *bpmp_lut;
+ u32 cpu;
+ int ret;
- policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
+ bpmp_lut = data->clusters[cluster].bpmp_lut;
+
+ if (data->icc_dram_bw_scaling) {
+ ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
+ if (!ret) {
+ policy->freq_table = freq_table;
+ return 0;
+ }
+ }
+
+ data->icc_dram_bw_scaling = false;
+ policy->freq_table = bpmp_lut;
+ pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
+
return 0;
}
@@ -86,23 +211,30 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
- unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ unsigned int edvd_offset;
u32 edvd_val = tbl->driver_data;
+ u32 cpu;
+
+ for_each_cpu(cpu, policy->cpus) {
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+
+ if (data->icc_dram_bw_scaling)
+ tegra_cpufreq_set_bw(policy, tbl->frequency);
- writel(edvd_val, data->regs + edvd_offset);
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -110,7 +242,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
@@ -123,18 +254,18 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
-static struct cpufreq_frequency_table *init_vhint_table(
+static struct cpufreq_frequency_table *tegra_cpufreq_bpmp_read_lut(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
+ int *num_rates)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
- int err, i, j, num_rates = 0;
+ int err, i, j;
dma_addr_t phys;
void *virt;
@@ -164,6 +295,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ *num_rates = 0;
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
@@ -174,10 +306,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
- num_rates++;
+ (*num_rates)++;
}
- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
@@ -219,7 +351,10 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- unsigned int i = 0, err;
+ struct device *cpu_dev;
+ unsigned int i = 0, err, edvd_offset;
+ int num_rates = 0;
+ u32 edvd_val, cpu;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
@@ -242,15 +377,39 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
- if (IS_ERR(cluster->table)) {
- err = PTR_ERR(cluster->table);
+ cluster->bpmp_lut = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, cluster, i, &num_rates);
+ if (IS_ERR(cluster->bpmp_lut)) {
+ err = PTR_ERR(cluster->bpmp_lut);
+ goto put_bpmp;
+ } else if (!num_rates) {
+ err = -EINVAL;
goto put_bpmp;
}
+
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == i) {
+ edvd_val = cluster->bpmp_lut[num_rates - 1].driver_data;
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+ }
}
tegra186_cpufreq_driver.driver_data = data;
+ /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ err = -EPROBE_DEFER;
+ goto put_bpmp;
+ }
+
+ if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
+ err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (!err)
+ data->icc_dram_bw_scaling = true;
+ }
+
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
put_bpmp:
@@ -259,11 +418,9 @@ put_bpmp:
return err;
}
-static int tegra186_cpufreq_remove(struct platform_device *pdev)
+static void tegra186_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra186_cpufreq_driver);
-
- return 0;
}
static const struct of_device_id tegra186_cpufreq_of_match[] = {
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 36dad5ea5947..695599e1001f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -5,7 +5,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,10 +20,11 @@
#define KHZ 1000
#define REF_CLK_MHZ 408 /* 408 MHz */
-#define US_DELAY 500
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
#define MAX_CNT ~0U
+#define MAX_DELTA_KHZ 115200
+
#define NDIV_MASK 0x1FF
#define CORE_OFFSET(cpu) (cpu * 8)
@@ -39,6 +39,12 @@
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
+struct tegra_cpu_data {
+ u32 cpuid;
+ u32 clusterid;
+ void __iomem *freq_core_reg;
+};
+
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -62,6 +68,7 @@ struct tegra_cpufreq_soc {
int maxcpus_per_cluster;
unsigned int num_clusters;
phys_addr_t actmon_cntr_base;
+ u32 refclk_delta_min;
};
struct tegra194_cpufreq_data {
@@ -69,6 +76,7 @@ struct tegra194_cpufreq_data {
struct cpufreq_frequency_table **bpmp_luts;
const struct tegra_cpufreq_soc *soc;
bool icc_dram_bw_scaling;
+ struct tegra_cpu_data *cpu_data;
};
static struct workqueue_struct *read_counters_wq;
@@ -116,14 +124,8 @@ static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- void __iomem *freq_core_reg;
- u64 mpidr_id;
-
- /* use physical id to get address of per core frequency register */
- mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
- freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
- *ndiv = readl(freq_core_reg) & NDIV_MASK;
+ *ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK;
return 0;
}
@@ -131,19 +133,10 @@ static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- void __iomem *freq_core_reg;
- u32 cpu, cpuid, clusterid;
- u64 mpidr_id;
-
- for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
- data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
-
- /* use physical id to get address of per core frequency register */
- mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
- freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+ u32 cpu;
- writel(ndiv, freq_core_reg);
- }
+ for_each_cpu(cpu, policy->cpus)
+ writel(ndiv, data->cpu_data[cpu].freq_core_reg);
}
/*
@@ -157,19 +150,35 @@ static void tegra234_read_counters(struct tegra_cpu_ctr *c)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
void __iomem *actmon_reg;
- u32 cpuid, clusterid;
+ u32 delta_refcnt;
+ int cnt = 0;
u64 val;
- data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
- actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
+ actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid,
+ data->cpu_data[c->cpu].cpuid);
val = readq(actmon_reg);
c->last_refclk_cnt = upper_32_bits(val);
c->last_coreclk_cnt = lower_32_bits(val);
- udelay(US_DELAY);
- val = readq(actmon_reg);
- c->refclk_cnt = upper_32_bits(val);
- c->coreclk_cnt = lower_32_bits(val);
+
+ /*
+ * The sampling window is based on the minimum number of reference
+ * clock cycles which is known to give a stable value of CPU frequency.
+ */
+ do {
+ val = readq(actmon_reg);
+ c->refclk_cnt = upper_32_bits(val);
+ c->coreclk_cnt = lower_32_bits(val);
+ if (c->refclk_cnt < c->last_refclk_cnt)
+ delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
+ else
+ delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
+ if (++cnt >= 0xFFFF) {
+ pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
+ c->cpu, delta_refcnt, cnt);
+ break;
+ }
+ } while (delta_refcnt < data->soc->refclk_delta_min);
}
static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
@@ -184,6 +193,7 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
.num_clusters = 3,
+ .refclk_delta_min = 16000,
};
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
@@ -191,6 +201,7 @@ static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
.actmon_cntr_base = 0x4000,
.maxcpus_per_cluster = 8,
.num_clusters = 1,
+ .refclk_delta_min = 16000,
};
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
@@ -231,15 +242,33 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
static void tegra194_read_counters(struct tegra_cpu_ctr *c)
{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ u32 delta_refcnt;
+ int cnt = 0;
u64 val;
val = read_freq_feedback();
c->last_refclk_cnt = lower_32_bits(val);
c->last_coreclk_cnt = upper_32_bits(val);
- udelay(US_DELAY);
- val = read_freq_feedback();
- c->refclk_cnt = lower_32_bits(val);
- c->coreclk_cnt = upper_32_bits(val);
+
+ /*
+ * The sampling window is based on the minimum number of reference
+ * clock cycles which is known to give a stable value of CPU frequency.
+ */
+ do {
+ val = read_freq_feedback();
+ c->refclk_cnt = lower_32_bits(val);
+ c->coreclk_cnt = upper_32_bits(val);
+ if (c->refclk_cnt < c->last_refclk_cnt)
+ delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
+ else
+ delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
+ if (++cnt >= 0xFFFF) {
+ pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
+ c->cpu, delta_refcnt, cnt);
+ break;
+ }
+ } while (delta_refcnt < data->soc->refclk_delta_min);
}
static void tegra_read_counters(struct work_struct *work)
@@ -297,9 +326,8 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
u32 rate_mhz;
/*
- * udelay() is required to reconstruct cpu frequency over an
- * observation window. Using workqueue to call udelay() with
- * interrupts enabled.
+ * Reconstruct cpu frequency over an observation/sampling window.
+ * Using workqueue to keep interrupts enabled during the interval.
*/
read_counters_work.c.cpu = cpu;
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
@@ -357,19 +385,17 @@ static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
static unsigned int tegra194_get_speed(u32 cpu)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ u32 clusterid = data->cpu_data[cpu].clusterid;
struct cpufreq_frequency_table *pos;
- u32 cpuid, clusterid;
unsigned int rate;
u64 ndiv;
int ret;
- data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
-
/* reconstruct actual cpu freq using counters */
rate = tegra194_calculate_speed(cpu);
/* get last written ndiv value */
- ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
+ ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv);
if (WARN_ON_ONCE(ret))
return rate;
@@ -383,9 +409,9 @@ static unsigned int tegra194_get_speed(u32 cpu)
if (pos->driver_data != ndiv)
continue;
- if (abs(pos->frequency - rate) > 115200) {
- pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
- cpu, rate, pos->frequency, ndiv);
+ if (abs(pos->frequency - rate) > MAX_DELTA_KHZ) {
+ pr_warn("cpufreq: cpu%d,cur:%u,set:%u,delta:%d,set ndiv:%llu\n",
+ cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv);
} else {
rate = pos->frequency;
}
@@ -450,6 +476,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
if (IS_ERR(opp))
continue;
+ dev_pm_opp_put(opp);
+
ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
if (ret < 0)
return ret;
@@ -473,13 +501,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ u32 clusterid = data->cpu_data[policy->cpu].clusterid;
struct cpufreq_frequency_table *freq_table;
struct cpufreq_frequency_table *bpmp_lut;
u32 start_cpu, cpu;
- u32 clusterid;
int ret;
- data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
return -EINVAL;
@@ -508,6 +535,30 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
+static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* We did light-weight tear down earlier, nothing to do here */
+ return 0;
+}
+
+static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /*
+ * Preserve policy->driver_data and don't free resources on light-weight
+ * tear down.
+ */
+
+ return 0;
+}
+
+static void tegra194_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+}
+
static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -535,7 +586,9 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
.init = tegra194_cpufreq_init,
- .attr = cpufreq_generic_attr,
+ .exit = tegra194_cpufreq_exit,
+ .online = tegra194_cpufreq_online,
+ .offline = tegra194_cpufreq_offline,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
@@ -549,6 +602,7 @@ static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
.num_clusters = 4,
+ .refclk_delta_min = 16000,
};
static void tegra194_cpufreq_free_resources(void)
@@ -628,6 +682,28 @@ tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpm
return freq_table;
}
+static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data)
+{
+ int num_cpus = data->soc->maxcpus_per_cluster * data->soc->num_clusters;
+ u32 cpuid, clusterid;
+ u64 mpidr_id;
+
+ if (cpu > (num_cpus - 1)) {
+ pr_err("cpufreq: wrong num of cpus or clusters in soc data\n");
+ return -EINVAL;
+ }
+
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+
+ data->cpu_data[cpu].cpuid = cpuid;
+ data->cpu_data[cpu].clusterid = clusterid;
+ data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ return 0;
+}
+
static int tegra194_cpufreq_probe(struct platform_device *pdev)
{
const struct tegra_cpufreq_soc *soc;
@@ -635,6 +711,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
struct tegra_bpmp *bpmp;
struct device *cpu_dev;
int err, i;
+ u32 cpu;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -642,7 +719,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev);
- if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
+ if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters && soc->refclk_delta_min) {
data->soc = soc;
} else {
dev_err(&pdev->dev, "soc data missing\n");
@@ -661,13 +738,20 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
}
+ data->cpu_data = devm_kcalloc(&pdev->dev, data->soc->num_clusters *
+ data->soc->maxcpus_per_cluster,
+ sizeof(*data->cpu_data), GFP_KERNEL);
+ if (!data->cpu_data)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, data);
bpmp = tegra_bpmp_get(&pdev->dev);
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
+ read_counters_wq = alloc_workqueue("read_counters_wq",
+ __WQ_LEGACY | WQ_PERCPU, 1);
if (!read_counters_wq) {
dev_err(&pdev->dev, "fail to create_workqueue\n");
err = -EINVAL;
@@ -682,6 +766,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
}
}
+ for_each_possible_cpu(cpu) {
+ err = tegra194_cpufreq_store_physids(cpu, data);
+ if (err)
+ goto err_free_res;
+ }
+
tegra194_cpufreq_driver.driver_data = data;
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
@@ -708,12 +798,10 @@ put_bpmp:
return err;
}
-static int tegra194_cpufreq_remove(struct platform_device *pdev)
+static void tegra194_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra194_cpufreq_driver);
tegra194_cpufreq_free_resources();
-
- return 0;
}
static const struct of_device_id tegra194_cpufreq_of_match[] = {
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index d5cd2fd25cad..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -12,10 +12,11 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#define REVISION_MASK 0xF
#define REVISION_SHIFT 28
@@ -47,6 +48,37 @@
#define AM625_SUPPORT_S_MPU_OPP BIT(1)
#define AM625_SUPPORT_T_MPU_OPP BIT(2)
+enum {
+ AM62A7_EFUSE_M_MPU_OPP = 13,
+ AM62A7_EFUSE_N_MPU_OPP,
+ AM62A7_EFUSE_O_MPU_OPP,
+ AM62A7_EFUSE_P_MPU_OPP,
+ AM62A7_EFUSE_Q_MPU_OPP,
+ AM62A7_EFUSE_R_MPU_OPP,
+ AM62A7_EFUSE_S_MPU_OPP,
+ /*
+ * The V, U, and T speed grade numbering is out of order
+ * to align with the AM625 more uniformly. I promise I know
+ * my ABCs ;)
+ */
+ AM62A7_EFUSE_V_MPU_OPP,
+ AM62A7_EFUSE_U_MPU_OPP,
+ AM62A7_EFUSE_T_MPU_OPP,
+};
+
+#define AM62A7_SUPPORT_N_MPU_OPP BIT(0)
+#define AM62A7_SUPPORT_R_MPU_OPP BIT(1)
+#define AM62A7_SUPPORT_V_MPU_OPP BIT(2)
+
+#define AM62P5_EFUSE_O_MPU_OPP 15
+#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
+#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
+
+#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
+#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
@@ -61,6 +93,11 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_shift;
unsigned long rev_offset;
bool multi_regulator;
+/* Backward compatibility hack: Might have missing syscon */
+#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
+/* Backward compatibility hack: new syscon size is 1 register wide */
+#define TI_QUIRK_SYSCON_IS_SINGLE_REG 0x2
+ u8 quirks;
};
struct ti_cpufreq_data {
@@ -112,6 +149,51 @@ static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
return BIT(efuse);
}
+static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
+
+ switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
+ case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
+ case AM62P5_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
+ fallthrough;
+ case AM62P5_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62P5_SUPPORT_O_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
+static unsigned long am62a7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = AM62A7_SUPPORT_N_MPU_OPP;
+
+ switch (efuse) {
+ case AM62A7_EFUSE_V_MPU_OPP:
+ case AM62A7_EFUSE_U_MPU_OPP:
+ case AM62A7_EFUSE_T_MPU_OPP:
+ case AM62A7_EFUSE_S_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_V_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_R_MPU_OPP:
+ case AM62A7_EFUSE_Q_MPU_OPP:
+ case AM62A7_EFUSE_P_MPU_OPP:
+ case AM62A7_EFUSE_O_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_R_MPU_OPP;
+ fallthrough;
+ case AM62A7_EFUSE_N_MPU_OPP:
+ case AM62A7_EFUSE_M_MPU_OPP:
+ calculated_efuse |= AM62A7_SUPPORT_N_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
static unsigned long am625_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long efuse)
{
@@ -182,6 +264,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
.efuse_mask = BIT(3),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -209,6 +292,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.efuse_mask = BIT(9),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = true,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -223,6 +307,15 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.efuse_mask = 0,
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+};
+
+static const struct soc_device_attribute k3_cpufreq_soc[] = {
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
+ { /* sentinel */ }
};
static struct ti_cpufreq_soc_data am625_soc_data = {
@@ -230,7 +323,23 @@ static struct ti_cpufreq_soc_data am625_soc_data = {
.efuse_offset = 0x0018,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
+ .multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_IS_SINGLE_REG,
+};
+
+static struct ti_cpufreq_soc_data am62a7_soc_data = {
+ .efuse_xlate = am62a7_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
+ .multi_regulator = false,
+};
+
+static struct ti_cpufreq_soc_data am62p5_soc_data = {
+ .efuse_xlate = am62p5_efuse_xlate,
+ .efuse_offset = 0x0,
+ .efuse_mask = 0x07c0,
+ .efuse_shift = 0x6,
.multi_regulator = false,
};
@@ -250,7 +359,11 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret == -EIO) {
+
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_IS_SINGLE_REG && ret == -EIO)
+ ret = regmap_read(opp_data->syscon, 0x0, &efuse);
+
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->efuse_offset, 4);
@@ -288,10 +401,20 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
struct device *dev = opp_data->cpu_dev;
u32 revision;
int ret;
+ if (soc_device_match(k3_cpufreq_soc)) {
+ /*
+ * Since the SR is 1.0, hard code the revision_value as
+ * 0x1 here. This way we avoid re using the same register
+ * that is giving us required information inside socinfo
+ * anyway.
+ */
+ *revision_value = 0x1;
+ goto done;
+ }
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret == -EIO) {
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->rev_offset, 4);
@@ -310,6 +433,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
*revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+done:
return 0;
}
@@ -329,7 +453,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
return 0;
}
-static const struct of_device_id ti_cpufreq_of_match[] = {
+static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
{ .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
@@ -337,7 +461,9 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
- { .compatible = "ti,am62a7", .data = &am625_soc_data, },
+ { .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
@@ -346,12 +472,10 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
static const struct of_device_id *ti_cpufreq_match_node(void)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_find_node_by_path("/");
const struct of_device_id *match;
- np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
- of_node_put(np);
return match;
}
@@ -418,7 +542,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
if (ret < 0) {
- dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n");
goto fail_put_node;
}
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index d295f405c4bb..65fea47b82e6 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
@@ -448,7 +447,7 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+static void ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev;
@@ -456,11 +455,10 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
policy->cpu);
- return -ENODEV;
+ return;
}
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- return 0;
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
@@ -473,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
- .attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
@@ -552,7 +549,7 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+static void ve_spc_cpufreq_remove(struct platform_device *pdev)
{
bL_switcher_get_enabled();
__bLs_unregister_notifier();
@@ -560,7 +557,6 @@ static int ve_spc_cpufreq_remove(struct platform_device *pdev)
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
ve_spc_cpufreq_driver.name);
- return 0;
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
new file mode 100644
index 000000000000..6ffa16d239b2
--- /dev/null
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <linux/arch_topology.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * CPU0..CPUn
+ * +-------------+-------------------------------+--------+-------+
+ * | Register | Description | Offset | Len |
+ * +-------------+-------------------------------+--------+-------+
+ * | cur_perf | read this register to get | 0x0 | 0x4 |
+ * | | the current perf (integer val | | |
+ * | | representing perf relative to | | |
+ * | | max performance) | | |
+ * | | that vCPU is running at | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | set_perf | write to this register to set | 0x4 | 0x4 |
+ * | | perf value of the vCPU | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | perftbl_len | number of entries in perf | 0x8 | 0x4 |
+ * | | table. A single entry in the | | |
+ * | | perf table denotes no table | | |
+ * | | and the entry contains | | |
+ * | | the maximum perf value | | |
+ * | | that this vCPU supports. | | |
+ * | | The guest can request any | | |
+ * | | value between 1 and max perf | | |
+ * | | when perftbls are not used. | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_sel | write to this register to | 0xc | 0x4 |
+ * | | select perf table entry to | | |
+ * | | read from | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_rd | read this register to get | 0x10 | 0x4 |
+ * | | perf value of the selected | | |
+ * | | entry based on perftbl_sel | | |
+ * +---------------------------------------------+--------+-------+
+ * | perf_domain | performance domain number | 0x14 | 0x4 |
+ * | | that this vCPU belongs to. | | |
+ * | | vCPUs sharing the same perf | | |
+ * | | domain number are part of the | | |
+ * | | same performance domain. | | |
+ * +-------------+-------------------------------+--------+-------+
+ */
+
+#define REG_CUR_PERF_STATE_OFFSET 0x0
+#define REG_SET_PERF_STATE_OFFSET 0x4
+#define REG_PERFTBL_LEN_OFFSET 0x8
+#define REG_PERFTBL_SEL_OFFSET 0xc
+#define REG_PERFTBL_RD_OFFSET 0x10
+#define REG_PERF_DOMAIN_OFFSET 0x14
+#define PER_CPU_OFFSET 0x1000
+
+#define PERFTBL_MAX_ENTRIES 64U
+
+static void __iomem *base;
+static DEFINE_PER_CPU(u32, perftbl_num_entries);
+
+static void virt_scale_freq_tick(void)
+{
+ int cpu = smp_processor_id();
+ u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu);
+ u64 cur_freq;
+ unsigned long scale;
+
+ cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET
+ + REG_CUR_PERF_STATE_OFFSET);
+
+ cur_freq <<= SCHED_CAPACITY_SHIFT;
+ scale = (unsigned long)div_u64(cur_freq, max_freq);
+ scale = min(scale, SCHED_CAPACITY_SCALE);
+
+ this_cpu_write(arch_freq_scale, scale);
+}
+
+static struct scale_freq_data virt_sfd = {
+ .source = SCALE_FREQ_SOURCE_VIRT,
+ .set_freq_scale = virt_scale_freq_tick,
+};
+
+static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ writel_relaxed(target_freq,
+ base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET);
+ return 0;
+}
+
+static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ virt_cpufreq_set_perf(policy, target_freq);
+ return target_freq;
+}
+
+static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx)
+{
+ writel_relaxed(idx, base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_SEL_OFFSET);
+ return readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_RD_OFFSET);
+}
+
+static int virt_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int ret = 0;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = virt_cpufreq_set_perf(policy, target_freq);
+ cpufreq_freq_transition_end(policy, &freqs, ret != 0);
+
+ return ret;
+}
+
+static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
+{
+ u32 cur_perf_domain, perf_domain;
+ struct device *cpu_dev;
+ int cpu;
+
+ cur_perf_domain = readl_relaxed(base + policy->cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ for_each_present_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ perf_domain = readl_relaxed(base + cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ if (perf_domain == cur_perf_domain)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
+ return 0;
+}
+
+static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ u32 num_perftbl_entries, idx;
+
+ num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
+
+ if (num_perftbl_entries == 1) {
+ policy->cpuinfo.min_freq = 1;
+ policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cur = policy->max;
+ return 0;
+ }
+
+ table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (idx = 0; idx < num_perftbl_entries; idx++)
+ table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
+
+ table[idx].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ ret = virt_cpufreq_get_freq_info(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get cpufreq info\n");
+ return ret;
+ }
+
+ ret = virt_cpufreq_get_sharing_cpus(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ /*
+ * To simplify and improve latency of handling frequency requests on
+ * the host side, this ensures that the vCPU thread triggering the MMIO
+ * abort is the same thread whose performance constraints (Ex. uclamp
+ * settings) need to be updated. This simplifies the VMM (Virtual
+ * Machine Manager) having to find the correct vCPU thread and/or
+ * facing permission issues when configuring other threads.
+ */
+ policy->dvfs_possible_from_any_cpu = false;
+ policy->fast_switch_possible = true;
+
+ /*
+ * Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since
+ * the actual physical CPU frequency may not match requested frequency
+ * from the vCPU thread due to frequency update latencies or other
+ * inputs to the physical CPU frequency selection. This additional FIE
+ * source allows for more accurate freq_scale updates and only takes
+ * effect if another FIE source such as AMUs have not been registered.
+ */
+ topology_set_scale_freq_source(&virt_sfd, policy->cpus);
+
+ return 0;
+}
+
+static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus);
+ kfree(policy->freq_table);
+}
+
+static int virt_cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* Nothing to restore. */
+ return 0;
+}
+
+static int virt_cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /* Dummy offline() to avoid exit() being called and freeing resources. */
+ return 0;
+}
+
+static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
+{
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static struct cpufreq_driver cpufreq_virt_driver = {
+ .name = "virt-cpufreq",
+ .init = virt_cpufreq_cpu_init,
+ .exit = virt_cpufreq_cpu_exit,
+ .online = virt_cpufreq_online,
+ .offline = virt_cpufreq_offline,
+ .verify = virt_cpufreq_verify_policy,
+ .target = virt_cpufreq_target,
+ .fast_switch = virt_cpufreq_fast_switch,
+};
+
+static int virt_cpufreq_driver_probe(struct platform_device *pdev)
+{
+ u32 num_perftbl_entries;
+ int ret, cpu;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for_each_possible_cpu(cpu) {
+ num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_LEN_OFFSET);
+
+ if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES)
+ return -ENODEV;
+
+ per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
+ }
+
+ ret = cpufreq_register_driver(&cpufreq_virt_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n");
+ return 0;
+}
+
+static void virt_cpufreq_driver_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpufreq_virt_driver);
+}
+
+static const struct of_device_id virt_cpufreq_match[] = {
+ { .compatible = "qemu,virtual-cpufreq", .data = NULL},
+ {}
+};
+MODULE_DEVICE_TABLE(of, virt_cpufreq_match);
+
+static struct platform_driver virt_cpufreq_driver = {
+ .probe = virt_cpufreq_driver_probe,
+ .remove = virt_cpufreq_driver_remove,
+ .driver = {
+ .name = "virt-cpufreq",
+ .of_match_table = virt_cpufreq_match,
+ },
+};
+
+static int __init virt_cpufreq_init(void)
+{
+ return platform_driver_register(&virt_cpufreq_driver);
+}
+postcore_initcall(virt_cpufreq_init);
+
+static void __exit virt_cpufreq_exit(void)
+{
+ platform_driver_unregister(&virt_cpufreq_driver);
+}
+module_exit(virt_cpufreq_exit);
+
+MODULE_DESCRIPTION("Virtual cpufreq driver");
+MODULE_LICENSE("GPL");