summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2023-10-26 15:14:38 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2023-10-26 15:14:38 +0200
commit78b1f56a6f9623d5ac42dc2e9860a58273e8ccae (patch)
tree17c0127f6c617b2cd7e9938895c3fc4c77f40304
parentc1bdc9aaf8d05e12c80f6cbd26f187f29fbe163a (diff)
parent7c35584899ff4ba8c60a65ec7cf8af4e661911a9 (diff)
Merge branch 'pm-cpufreq'
Merge cpufreq updates for 6.7-rc1: - Add support for several Qualcomm SoC versions and other similar changes (Christian Marangi, Dmitry Baryshkov, Luca Weiss, Neil Armstrong, Richard Acayan, Robert Marko, Rohit Agarwal, Stephan Gerhold and Varadarajan Narayanan). - Clean up the tegra cpufreq driver (Sumit Gupta). - Use of_property_read_reg() to parse "reg" in pmac32 driver (Rob Herring). - Add support for TI's am62p5 Soc (Bryan Brattlof). - Make ARM_BRCMSTB_AVS_CPUFREQ depends on !ARM_SCMI_CPUFREQ (Florian Fainelli). - Update Kconfig to mention i.MX7 as well (Alexander Stein). - Revise global turbo disable check in intel_pstate (Srinivas Pandruvada). - Carry out initialization of sg_cpu in the schedutil cpufreq governor in one loop (Liao Chang). - Simplify the condition for storing 'down_threshold' in the conservative cpufreq governor (Liao Chang). - Use fine-grained mutex in the userspace cpufreq governor (Liao Chang). - Move is_managed indicator in the userspace cpufreq governor into a per-policy structure (Liao Chang). - Rebuild sched-domains when removing cpufreq driver (Pierre Gondois). - Fix buffer overflow detection in trans_stats() (Christian Marangi). * pm-cpufreq: (32 commits) dt-bindings: cpufreq: qcom-hw: document SM8650 CPUFREQ Hardware cpufreq: arm: Kconfig: Add i.MX7 to supported SoC for ARM_IMX_CPUFREQ_DT cpufreq: qcom-nvmem: add support for IPQ8064 cpufreq: qcom-nvmem: also accept operating-points-v2-krait-cpu cpufreq: qcom-nvmem: drop pvs_ver for format a fuses dt-bindings: cpufreq: qcom-cpufreq-nvmem: Document krait-cpu cpufreq: qcom-nvmem: add support for IPQ6018 dt-bindings: cpufreq: qcom-cpufreq-nvmem: document IPQ6018 cpufreq: qcom-nvmem: Add MSM8909 cpufreq: qcom-nvmem: Simplify driver data allocation cpufreq: stats: Fix buffer overflow detection in trans_stats() dt-bindings: cpufreq: cpufreq-qcom-hw: Add SDX75 compatible cpufreq: ARM_BRCMSTB_AVS_CPUFREQ cannot be used with ARM_SCMI_CPUFREQ cpufreq: ti-cpufreq: Add opp support for am62p5 SoCs cpufreq: dt-platdev: add am62p5 to blocklist cpufreq: tegra194: remove redundant AND with cpu_online_mask cpufreq: tegra194: use refclk delta based loop instead of udelay cpufreq: tegra194: save CPU data to avoid repeated SMP calls cpufreq: Rebuild sched-domains when removing cpufreq driver cpufreq: userspace: Move is_managed indicator into per-policy structure ...
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml4
-rw-r--r--Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml8
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c14
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c76
-rw-r--r--drivers/cpufreq/intel_pstate.c6
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c7
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c208
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c153
-rw-r--r--drivers/cpufreq/ti-cpufreq.c1
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--kernel/sched/cpufreq_schedutil.c66
15 files changed, 387 insertions, 180 deletions
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index c1d225fcf2d5..56fc71d6a081 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -23,6 +23,7 @@ properties:
- enum:
- qcom,qcm2290-cpufreq-hw
- qcom,sc7180-cpufreq-hw
+ - qcom,sdm670-cpufreq-hw
- qcom,sdm845-cpufreq-hw
- qcom,sm6115-cpufreq-hw
- qcom,sm6350-cpufreq-hw
@@ -36,11 +37,13 @@ properties:
- qcom,sa8775p-cpufreq-epss
- qcom,sc7280-cpufreq-epss
- qcom,sc8280xp-cpufreq-epss
+ - qcom,sdx75-cpufreq-epss
- qcom,sm6375-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- qcom,sm8350-cpufreq-epss
- qcom,sm8450-cpufreq-epss
- qcom,sm8550-cpufreq-epss
+ - qcom,sm8650-cpufreq-epss
- const: qcom,cpufreq-epss
reg:
@@ -128,6 +131,7 @@ allOf:
- qcom,qdu1000-cpufreq-epss
- qcom,sc7180-cpufreq-hw
- qcom,sc8280xp-cpufreq-epss
+ - qcom,sdm670-cpufreq-hw
- qcom,sdm845-cpufreq-hw
- qcom,sm6115-cpufreq-hw
- qcom,sm6350-cpufreq-hw
diff --git a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
index 7e1bb992ce90..547265b8b118 100644
--- a/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/qcom-cpufreq-nvmem.yaml
@@ -27,8 +27,12 @@ select:
enum:
- qcom,apq8064
- qcom,apq8096
+ - qcom,ipq5332
+ - qcom,ipq6018
- qcom,ipq8064
- qcom,ipq8074
+ - qcom,ipq9574
+ - qcom,msm8909
- qcom,msm8939
- qcom,msm8960
- qcom,msm8974
@@ -43,7 +47,9 @@ patternProperties:
- if:
properties:
compatible:
- const: operating-points-v2-kryo-cpu
+ enum:
+ - operating-points-v2-krait-cpu
+ - operating-points-v2-kryo-cpu
then:
$ref: /schemas/opp/opp-v2-kryo-cpu.yaml#
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 123b4bbfcfee..f911606897b8 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -90,7 +90,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
- depends on ARCH_BRCMSTB || COMPILE_TEST
+ depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
default y
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
@@ -124,8 +124,8 @@ config ARM_IMX_CPUFREQ_DT
tristate "Freescale i.MX8M cpufreq support"
depends on ARCH_MXC && CPUFREQ_DT
help
- This adds cpufreq driver support for Freescale i.MX8M series SoCs,
- based on cpufreq-dt.
+ This adds cpufreq driver support for Freescale i.MX7/i.MX8M
+ series SoCs, based on cpufreq-dt.
If in doubt, say N.
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index fb2875ce1fdd..11b3e34b7696 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -142,9 +142,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra234", },
{ .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8909", },
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,msm8998", },
{ .compatible = "qcom,qcm2290", },
+ { .compatible = "qcom,qcm6490", },
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,qdu1000", },
{ .compatible = "qcom,sa8155p" },
@@ -176,7 +178,9 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62p5", },
+ { .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,apq8064", },
{ .compatible = "qcom,msm8974", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 60ed89000e82..4bc15634d49c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1544,7 +1544,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sched_cpufreq_governor_change() is called, which will result
+ * sugov_eas_rebuild_sd() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -2652,7 +2652,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
ret = cpufreq_start_governor(policy);
if (!ret) {
pr_debug("governor change\n");
- sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
cpufreq_exit_governor(policy);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b6bd0ff35323..56500b25d77c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -187,8 +187,7 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
ret = sscanf(buf, "%u", &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input > 100 ||
- input >= dbs_data->up_threshold)
+ if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index a33df3c66c88..40a9ff18da06 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, " From : To\n");
len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+ if (len >= PAGE_SIZE - 1)
+ return PAGE_SIZE - 1;
len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
if (pending)
@@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, "%9u ", count);
}
- if (len >= PAGE_SIZE)
+ if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "\n");
}
- if (len >= PAGE_SIZE) {
+ if (len >= PAGE_SIZE - 1) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 50a4d7846580..2c42fee76daa 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -15,8 +15,11 @@
#include <linux/mutex.h>
#include <linux/slab.h>
-static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
-static DEFINE_MUTEX(userspace_mutex);
+struct userspace_policy {
+ unsigned int is_managed;
+ unsigned int setspeed;
+ struct mutex mutex;
+};
/**
* cpufreq_set - set the CPU frequency
@@ -28,19 +31,19 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
- mutex_lock(&userspace_mutex);
- if (!per_cpu(cpu_is_managed, policy->cpu))
+ mutex_lock(&userspace->mutex);
+ if (!userspace->is_managed)
goto err;
- *setspeed = freq;
+ userspace->setspeed = freq;
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
- mutex_unlock(&userspace_mutex);
+ mutex_unlock(&userspace->mutex);
return ret;
}
@@ -51,67 +54,74 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
{
- unsigned int *setspeed;
+ struct userspace_policy *userspace;
- setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
- if (!setspeed)
+ userspace = kzalloc(sizeof(*userspace), GFP_KERNEL);
+ if (!userspace)
return -ENOMEM;
- policy->governor_data = setspeed;
+ mutex_init(&userspace->mutex);
+
+ policy->governor_data = userspace;
return 0;
}
+/*
+ * Any routine that writes to the policy struct will hold the "rwsem" of
+ * policy struct that means it is free to free "governor_data" here.
+ */
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
{
- mutex_lock(&userspace_mutex);
kfree(policy->governor_data);
policy->governor_data = NULL;
- mutex_unlock(&userspace_mutex);
}
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", policy->cpu);
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, policy->cpu) = 1;
- *setspeed = policy->cur;
- mutex_unlock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
+ userspace->is_managed = 1;
+ userspace->setspeed = policy->cur;
+ mutex_unlock(&userspace->mutex);
return 0;
}
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
pr_debug("managing cpu %u stopped\n", policy->cpu);
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, policy->cpu) = 0;
- *setspeed = 0;
- mutex_unlock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
+ userspace->is_managed = 0;
+ userspace->setspeed = 0;
+ mutex_unlock(&userspace->mutex);
}
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
{
- unsigned int *setspeed = policy->governor_data;
+ struct userspace_policy *userspace = policy->governor_data;
- mutex_lock(&userspace_mutex);
+ mutex_lock(&userspace->mutex);
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
- policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
-
- if (policy->max < *setspeed)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > *setspeed)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
+
+ if (policy->max < userspace->setspeed)
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ else if (policy->min > userspace->setspeed)
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_L);
else
- __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, userspace->setspeed,
+ CPUFREQ_RELATION_L);
- mutex_unlock(&userspace_mutex);
+ mutex_unlock(&userspace->mutex);
}
static struct cpufreq_governor cpufreq_gov_userspace = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dc50c9fb488d..a534a1f7f1ee 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -571,13 +571,9 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
static inline void update_turbo_state(void)
{
u64 misc_en;
- struct cpudata *cpu;
- cpu = all_cpu_data[0];
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- global.turbo_disabled =
- (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
+ global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
}
static int min_perf_pct_min(void)
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ec75e79659ac..df3567c1e93b 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/hardirq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -378,10 +379,9 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
static u32 read_gpio(struct device_node *np)
{
- const u32 *reg = of_get_property(np, "reg", NULL);
- u32 offset;
+ u64 offset;
- if (reg == NULL)
+ if (of_property_read_reg(np, 0, &offset, NULL) < 0)
return 0;
/* That works for all keylargos but shall be fixed properly
* some day... The problem is that it seems we can't rely
@@ -389,7 +389,6 @@ static u32 read_gpio(struct device_node *np)
* relative to the base of KeyLargo or to the base of the
* GPIO space, and the device-tree doesn't help.
*/
- offset = *reg;
if (offset < KEYLARGO_GPIO_LEVELS0)
offset += KEYLARGO_GPIO_LEVELS0;
return offset;
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 84d7033e5efe..15367ac08b2b 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -30,6 +30,14 @@
#include <dt-bindings/arm/qcom,ids.h>
+enum ipq806x_versions {
+ IPQ8062_VERSION = 0,
+ IPQ8064_VERSION,
+ IPQ8065_VERSION,
+};
+
+#define IPQ6000_VERSION BIT(2)
+
struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
@@ -40,16 +48,38 @@ struct qcom_cpufreq_match_data {
const char **genpd_names;
};
+struct qcom_cpufreq_drv_cpu {
+ int opp_token;
+};
+
struct qcom_cpufreq_drv {
- int *opp_tokens;
u32 versions;
const struct qcom_cpufreq_match_data *data;
+ struct qcom_cpufreq_drv_cpu cpus[];
};
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+static int qcom_cpufreq_simple_get_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u8 *speedbin;
+
+ *pvs_name = NULL;
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ dev_dbg(cpu_dev, "speedbin: %d\n", *speedbin);
+ drv->versions = 1 << *speedbin;
+ kfree(speedbin);
+ return 0;
+}
+
static void get_krait_bin_format_a(struct device *cpu_dev,
- int *speed, int *pvs, int *pvs_ver,
+ int *speed, int *pvs,
u8 *buf)
{
u32 pte_efuse;
@@ -180,8 +210,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
switch (len) {
case 4:
- get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
- speedbin);
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
break;
case 8:
get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
@@ -203,6 +232,114 @@ len_error:
return ret;
}
+static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ int speed = 0, pvs = 0;
+ int msm_id, ret = 0;
+ u8 *speedbin;
+ size_t len;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ if (len != 4) {
+ dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret)
+ goto exit;
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ8062:
+ drv->versions = BIT(IPQ8062_VERSION);
+ break;
+ case QCOM_ID_IPQ8064:
+ case QCOM_ID_IPQ8066:
+ case QCOM_ID_IPQ8068:
+ drv->versions = BIT(IPQ8064_VERSION);
+ break;
+ case QCOM_ID_IPQ8065:
+ case QCOM_ID_IPQ8069:
+ drv->versions = BIT(IPQ8065_VERSION);
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ8064 family, limiting to 1.0GHz!\n",
+ msm_id);
+ drv->versions = BIT(IPQ8062_VERSION);
+ break;
+ }
+
+ /* IPQ8064 speed is never fused. Only pvs values are fused. */
+ snprintf(*pvs_name, sizeof("speed0-pvsXX"), "speed0-pvs%d", pvs);
+
+exit:
+ kfree(speedbin);
+ return ret;
+}
+
+static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ u32 msm_id;
+ int ret;
+ u8 *speedbin;
+ *pvs_name = NULL;
+
+ ret = qcom_smem_get_soc_id(&msm_id);
+ if (ret)
+ return ret;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (msm_id) {
+ case QCOM_ID_IPQ6005:
+ case QCOM_ID_IPQ6010:
+ case QCOM_ID_IPQ6018:
+ case QCOM_ID_IPQ6028:
+ /* Fuse Value Freq BIT to set
+ * ---------------------------------
+ * 2’b0 No Limit BIT(0)
+ * 2’b1 1.5 GHz BIT(1)
+ */
+ drv->versions = 1 << (unsigned int)(*speedbin);
+ break;
+ case QCOM_ID_IPQ6000:
+ /*
+ * IPQ6018 family only has one bit to advertise the CPU
+ * speed-bin, but that is not enough for IPQ6000 which
+ * is only rated up to 1.2GHz.
+ * So for IPQ6000 manually set BIT(2) based on SMEM ID.
+ */
+ drv->versions = IPQ6000_VERSION;
+ break;
+ default:
+ dev_err(cpu_dev,
+ "SoC ID %u is not part of IPQ6018 family, limiting to 1.2GHz!\n",
+ msm_id);
+ drv->versions = IPQ6000_VERSION;
+ break;
+ }
+
+ kfree(speedbin);
+ return 0;
+}
+
+static const char *generic_genpd_names[] = { "perf", NULL };
+
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
@@ -211,12 +348,25 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
.get_version = qcom_cpufreq_krait_name_version,
};
+static const struct qcom_cpufreq_match_data match_data_msm8909 = {
+ .get_version = qcom_cpufreq_simple_get_version,
+ .genpd_names = generic_genpd_names,
+};
+
static const char *qcs404_genpd_names[] = { "cpr", NULL };
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
.genpd_names = qcs404_genpd_names,
};
+static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
+ .get_version = qcom_cpufreq_ipq6018_name_version,
+};
+
+static const struct qcom_cpufreq_match_data match_data_ipq8064 = {
+ .get_version = qcom_cpufreq_ipq8064_name_version,
+};
+
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@@ -237,48 +387,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu") ||
+ of_device_is_compatible(np, "operating-points-v2-krait-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
}
- drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
+ GFP_KERNEL);
if (!drv)
return -ENOMEM;
match = pdev->dev.platform_data;
drv->data = match->data;
- if (!drv->data) {
- ret = -ENODEV;
- goto free_drv;
- }
+ if (!drv->data)
+ return -ENODEV;
if (drv->data->get_version) {
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
- if (IS_ERR(speedbin_nvmem)) {
- ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
- "Could not get nvmem cell\n");
- goto free_drv;
- }
+ if (IS_ERR(speedbin_nvmem))
+ return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+ "Could not get nvmem cell\n");
ret = drv->data->get_version(cpu_dev,
speedbin_nvmem, &pvs_name, drv);
if (ret) {
nvmem_cell_put(speedbin_nvmem);
- goto free_drv;
+ return ret;
}
nvmem_cell_put(speedbin_nvmem);
}
of_node_put(np);
- drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
- GFP_KERNEL);
- if (!drv->opp_tokens) {
- ret = -ENOMEM;
- goto free_drv;
- }
-
for_each_possible_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
@@ -304,9 +445,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
if (config.supported_hw || config.genpd_names) {
- drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
- if (drv->opp_tokens[cpu] < 0) {
- ret = drv->opp_tokens[cpu];
+ drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->cpus[cpu].opp_token < 0) {
+ ret = drv->cpus[cpu].opp_token;
dev_err(cpu_dev, "Failed to set OPP config\n");
goto free_opp;
}
@@ -325,11 +466,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu)
- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
- kfree(drv->opp_tokens);
-free_drv:
- kfree(drv);
-
+ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
return ret;
}
@@ -341,10 +478,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
- dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
-
- kfree(drv->opp_tokens);
- kfree(drv);
+ dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
static struct platform_driver qcom_cpufreq_driver = {
@@ -357,9 +491,11 @@ static struct platform_driver qcom_cpufreq_driver = {
static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
+ { .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
- { .compatible = "qcom,ipq8064", .data = &match_data_krait },
+ { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
+ { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,apq8064", .data = &match_data_krait },
{ .compatible = "qcom,msm8974", .data = &match_data_krait },
{ .compatible = "qcom,msm8960", .data = &match_data_krait },
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 88ef5e57ccd0..59865ea455a8 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -5,7 +5,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,10 +20,11 @@
#define KHZ 1000
#define REF_CLK_MHZ 408 /* 408 MHz */
-#define US_DELAY 500
#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
#define MAX_CNT ~0U
+#define MAX_DELTA_KHZ 115200
+
#define NDIV_MASK 0x1FF
#define CORE_OFFSET(cpu) (cpu * 8)
@@ -39,6 +39,12 @@
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
+struct tegra_cpu_data {
+ u32 cpuid;
+ u32 clusterid;
+ void __iomem *freq_core_reg;
+};
+
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -62,6 +68,7 @@ struct tegra_cpufreq_soc {
int maxcpus_per_cluster;
unsigned int num_clusters;
phys_addr_t actmon_cntr_base;
+ u32 refclk_delta_min;
};
struct tegra194_cpufreq_data {
@@ -69,6 +76,7 @@ struct tegra194_cpufreq_data {
struct cpufreq_frequency_table **bpmp_luts;
const struct tegra_cpufreq_soc *soc;
bool icc_dram_bw_scaling;
+ struct tegra_cpu_data *cpu_data;
};
static struct workqueue_struct *read_counters_wq;
@@ -116,14 +124,8 @@ static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- void __iomem *freq_core_reg;
- u64 mpidr_id;
- /* use physical id to get address of per core frequency register */
- mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
- freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
-
- *ndiv = readl(freq_core_reg) & NDIV_MASK;
+ *ndiv = readl(data->cpu_data[cpu].freq_core_reg) & NDIV_MASK;
return 0;
}
@@ -131,19 +133,10 @@ static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
- void __iomem *freq_core_reg;
- u32 cpu, cpuid, clusterid;
- u64 mpidr_id;
-
- for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
- data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
-
- /* use physical id to get address of per core frequency register */
- mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
- freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+ u32 cpu;
- writel(ndiv, freq_core_reg);
- }
+ for_each_cpu(cpu, policy->cpus)
+ writel(ndiv, data->cpu_data[cpu].freq_core_reg);
}
/*
@@ -157,19 +150,35 @@ static void tegra234_read_counters(struct tegra_cpu_ctr *c)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
void __iomem *actmon_reg;
- u32 cpuid, clusterid;
+ u32 delta_refcnt;
+ int cnt = 0;
u64 val;
- data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
- actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
+ actmon_reg = CORE_ACTMON_CNTR_REG(data, data->cpu_data[c->cpu].clusterid,
+ data->cpu_data[c->cpu].cpuid);
val = readq(actmon_reg);
c->last_refclk_cnt = upper_32_bits(val);
c->last_coreclk_cnt = lower_32_bits(val);
- udelay(US_DELAY);
- val = readq(actmon_reg);
- c->refclk_cnt = upper_32_bits(val);
- c->coreclk_cnt = lower_32_bits(val);
+
+ /*
+ * The sampling window is based on the minimum number of reference
+ * clock cycles which is known to give a stable value of CPU frequency.
+ */
+ do {
+ val = readq(actmon_reg);
+ c->refclk_cnt = upper_32_bits(val);
+ c->coreclk_cnt = lower_32_bits(val);
+ if (c->refclk_cnt < c->last_refclk_cnt)
+ delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
+ else
+ delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
+ if (++cnt >= 0xFFFF) {
+ pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
+ c->cpu, delta_refcnt, cnt);
+ break;
+ }
+ } while (delta_refcnt < data->soc->refclk_delta_min);
}
static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
@@ -184,6 +193,7 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
.num_clusters = 3,
+ .refclk_delta_min = 16000,
};
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
@@ -191,6 +201,7 @@ static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
.actmon_cntr_base = 0x4000,
.maxcpus_per_cluster = 8,
.num_clusters = 1,
+ .refclk_delta_min = 16000,
};
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
@@ -231,15 +242,33 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
static void tegra194_read_counters(struct tegra_cpu_ctr *c)
{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ u32 delta_refcnt;
+ int cnt = 0;
u64 val;
val = read_freq_feedback();
c->last_refclk_cnt = lower_32_bits(val);
c->last_coreclk_cnt = upper_32_bits(val);
- udelay(US_DELAY);
- val = read_freq_feedback();
- c->refclk_cnt = lower_32_bits(val);
- c->coreclk_cnt = upper_32_bits(val);
+
+ /*
+ * The sampling window is based on the minimum number of reference
+ * clock cycles which is known to give a stable value of CPU frequency.
+ */
+ do {
+ val = read_freq_feedback();
+ c->refclk_cnt = lower_32_bits(val);
+ c->coreclk_cnt = upper_32_bits(val);
+ if (c->refclk_cnt < c->last_refclk_cnt)
+ delta_refcnt = c->refclk_cnt + (MAX_CNT - c->last_refclk_cnt);
+ else
+ delta_refcnt = c->refclk_cnt - c->last_refclk_cnt;
+ if (++cnt >= 0xFFFF) {
+ pr_warn("cpufreq: problem with refclk on cpu:%d, delta_refcnt:%u, cnt:%d\n",
+ c->cpu, delta_refcnt, cnt);
+ break;
+ }
+ } while (delta_refcnt < data->soc->refclk_delta_min);
}
static void tegra_read_counters(struct work_struct *work)
@@ -297,9 +326,8 @@ static unsigned int tegra194_calculate_speed(u32 cpu)
u32 rate_mhz;
/*
- * udelay() is required to reconstruct cpu frequency over an
- * observation window. Using workqueue to call udelay() with
- * interrupts enabled.
+ * Reconstruct cpu frequency over an observation/sampling window.
+ * Using workqueue to keep interrupts enabled during the interval.
*/
read_counters_work.c.cpu = cpu;
INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
@@ -357,19 +385,17 @@ static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
static unsigned int tegra194_get_speed(u32 cpu)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ u32 clusterid = data->cpu_data[cpu].clusterid;
struct cpufreq_frequency_table *pos;
- u32 cpuid, clusterid;
unsigned int rate;
u64 ndiv;
int ret;
- data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
-
/* reconstruct actual cpu freq using counters */
rate = tegra194_calculate_speed(cpu);
/* get last written ndiv value */
- ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
+ ret = data->soc->ops->get_cpu_ndiv(cpu, data->cpu_data[cpu].cpuid, clusterid, &ndiv);
if (WARN_ON_ONCE(ret))
return rate;
@@ -383,9 +409,9 @@ static unsigned int tegra194_get_speed(u32 cpu)
if (pos->driver_data != ndiv)
continue;
- if (abs(pos->frequency - rate) > 115200) {
- pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
- cpu, rate, pos->frequency, ndiv);
+ if (abs(pos->frequency - rate) > MAX_DELTA_KHZ) {
+ pr_warn("cpufreq: cpu%d,cur:%u,set:%u,delta:%d,set ndiv:%llu\n",
+ cpu, rate, pos->frequency, abs(rate - pos->frequency), ndiv);
} else {
rate = pos->frequency;
}
@@ -450,6 +476,8 @@ static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
if (IS_ERR(opp))
continue;
+ dev_pm_opp_put(opp);
+
ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
if (ret < 0)
return ret;
@@ -473,13 +501,12 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ u32 clusterid = data->cpu_data[policy->cpu].clusterid;
struct cpufreq_frequency_table *freq_table;
struct cpufreq_frequency_table *bpmp_lut;
u32 start_cpu, cpu;
- u32 clusterid;
int ret;
- data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
return -EINVAL;
@@ -578,6 +605,7 @@ static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
.num_clusters = 4,
+ .refclk_delta_min = 16000,
};
static void tegra194_cpufreq_free_resources(void)
@@ -657,6 +685,28 @@ tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpm
return freq_table;
}
+static int tegra194_cpufreq_store_physids(unsigned int cpu, struct tegra194_cpufreq_data *data)
+{
+ int num_cpus = data->soc->maxcpus_per_cluster * data->soc->num_clusters;
+ u32 cpuid, clusterid;
+ u64 mpidr_id;
+
+ if (cpu > (num_cpus - 1)) {
+ pr_err("cpufreq: wrong num of cpus or clusters in soc data\n");
+ return -EINVAL;
+ }
+
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+
+ data->cpu_data[cpu].cpuid = cpuid;
+ data->cpu_data[cpu].clusterid = clusterid;
+ data->cpu_data[cpu].freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ return 0;
+}
+
static int tegra194_cpufreq_probe(struct platform_device *pdev)
{
const struct tegra_cpufreq_soc *soc;
@@ -664,6 +714,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
struct tegra_bpmp *bpmp;
struct device *cpu_dev;
int err, i;
+ u32 cpu;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -671,7 +722,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev);
- if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
+ if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters && soc->refclk_delta_min) {
data->soc = soc;
} else {
dev_err(&pdev->dev, "soc data missing\n");
@@ -690,6 +741,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
}
+ data->cpu_data = devm_kcalloc(&pdev->dev, data->soc->num_clusters *
+ data->soc->maxcpus_per_cluster,
+ sizeof(*data->cpu_data), GFP_KERNEL);
+ if (!data->cpu_data)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, data);
bpmp = tegra_bpmp_get(&pdev->dev);
@@ -711,6 +768,12 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
}
}
+ for_each_possible_cpu(cpu) {
+ err = tegra194_cpufreq_store_physids(cpu, data);
+ if (err)
+ goto err_free_res;
+ }
+
tegra194_cpufreq_driver.driver_data = data;
/* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 3c37d7899660..46c41e2ca727 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -338,6 +338,7 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am625_soc_data, },
+ { .compatible = "ti,am62p5", .data = &am625_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
{ .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 71d186d6933a..1c5ca92a0555 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1193,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
}
#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov) { }
-#endif
-
extern unsigned int arch_freq_get_on_cpu(int cpu);
#ifndef arch_set_freq_scale
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 458d359f5991..5888176354e2 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -556,6 +556,31 @@ static const struct kobj_type sugov_tunables_ktype = {
/********************** cpufreq governor interface *********************/
+#ifdef CONFIG_ENERGY_MODEL
+static void rebuild_sd_workfn(struct work_struct *work)
+{
+ rebuild_sched_domains_energy();
+}
+
+static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+
+/*
+ * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
+ * on governor changes to make sure the scheduler knows about it.
+ */
+static void sugov_eas_rebuild_sd(void)
+{
+ /*
+ * When called from the cpufreq_register_driver() path, the
+ * cpu_hotplug_lock is already held, so use a work item to
+ * avoid nested locking in rebuild_sched_domains().
+ */
+ schedule_work(&rebuild_sd_work);
+}
+#else
+static inline void sugov_eas_rebuild_sd(void) { };
+#endif
+
struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
@@ -710,6 +735,8 @@ static int sugov_init(struct cpufreq_policy *policy)
if (ret)
goto fail;
+ sugov_eas_rebuild_sd();
+
out:
mutex_unlock(&global_tunables_lock);
return 0;
@@ -751,6 +778,8 @@ static void sugov_exit(struct cpufreq_policy *policy)
sugov_kthread_stop(sg_policy);
sugov_policy_free(sg_policy);
cpufreq_disable_fast_switch(policy);
+
+ sugov_eas_rebuild_sd();
}
static int sugov_start(struct cpufreq_policy *policy)
@@ -768,14 +797,6 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
- for_each_cpu(cpu, policy->cpus) {
- struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
-
- memset(sg_cpu, 0, sizeof(*sg_cpu));
- sg_cpu->cpu = cpu;
- sg_cpu->sg_policy = sg_policy;
- }
-
if (policy_is_shared(policy))
uu = sugov_update_shared;
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
@@ -786,6 +807,9 @@ static int sugov_start(struct cpufreq_policy *policy)
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+ memset(sg_cpu, 0, sizeof(*sg_cpu));
+ sg_cpu->cpu = cpu;
+ sg_cpu->sg_policy = sg_policy;
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
}
return 0;
@@ -839,29 +863,3 @@ struct cpufreq_governor *cpufreq_default_governor(void)
#endif
cpufreq_governor_init(schedutil_gov);
-
-#ifdef CONFIG_ENERGY_MODEL
-static void rebuild_sd_workfn(struct work_struct *work)
-{
- rebuild_sched_domains_energy();
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov)
-{
- if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
- /*
- * When called from the cpufreq_register_driver() path, the
- * cpu_hotplug_lock is already held, so use a work item to
- * avoid nested locking in rebuild_sched_domains().
- */
- schedule_work(&rebuild_sd_work);
- }
-
-}
-#endif