summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c42
-rw-r--r--drivers/acpi/processor_idle.c7
-rw-r--r--drivers/base/cpu.c1
-rw-r--r--drivers/base/power/clock_ops.c13
-rw-r--r--drivers/base/power/common.c2
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/main.c11
-rw-r--r--drivers/base/power/runtime.c70
-rw-r--r--drivers/base/power/sysfs.c17
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm16
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/arm_big_little.c2
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c206
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c33
-rw-r--r--drivers/cpufreq/cpufreq.c134
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c5
-rw-r--r--drivers/cpufreq/e_powersaver.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c25
-rw-r--r--drivers/cpufreq/intel_pstate.c105
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c16
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c53
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c22
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c15
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c67
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c53
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c17
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig11
-rw-r--r--drivers/cpuidle/dt_idle_states.c15
-rw-r--r--drivers/cpuidle/governors/Makefile1
-rw-r--r--drivers/cpuidle/governors/teo.c444
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c16
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/opp/core.c22
-rw-r--r--drivers/opp/debugfs.c110
-rw-r--r--drivers/opp/of.c99
-rw-r--r--drivers/opp/opp.h15
-rw-r--r--drivers/powercap/intel_rapl.c2
-rw-r--r--drivers/thermal/Kconfig1
50 files changed, 1396 insertions, 409 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 217a782c3e55..1b207fca1420 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1051,6 +1051,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
/**
+ * cppc_get_desired_perf - Get the value of desired performance register.
+ * @cpunum: CPU from which to get desired performance.
+ * @desired_perf: address of a variable to store the returned desired performance
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cpc_register_resource *desired_reg;
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+
+ desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+
+ if (CPC_IN_PCC(desired_reg)) {
+ int ret = 0;
+
+ if (pcc_ss_id < 0)
+ return -EIO;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ cpc_read(cpunum, desired_reg, desired_perf);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+ }
+
+ cpc_read(cpunum, desired_reg, desired_perf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
+
+/**
* cppc_get_perf_caps - Get a CPUs performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b2131c4ea124..98d4ec5bf450 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C2].address,
pr->power.states[ACPI_STATE_C3].address));
+ snprintf(pr->power.states[ACPI_STATE_C2].desc,
+ ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
+ pr->power.states[ACPI_STATE_C2].address);
+ snprintf(pr->power.states[ACPI_STATE_C3].desc,
+ ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
+ pr->power.states[ACPI_STATE_C3].address);
+
return 0;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index eb9443d5bae1..6ce93a52bf3f 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata,
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
+ device_set_pm_not_required(dev);
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5a42ae4078c2..365ad751ce0f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
- clk_prepare(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
+ if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev,
+ "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
+ }
}
}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b413951c6abc..22aedb28aad7 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
* For a detailed function description, see dev_pm_domain_attach_by_id().
*/
struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 500de1dee967..2c334c01fc43 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
* power-domain-names DT property. For further description see
* genpd_dev_pm_attach_by_id().
*/
-struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
{
int index;
@@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void)
genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!genpd_debugfs_dir)
- return -ENOMEM;
-
- d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &summary_fops);
- if (!d)
- return -ENOMEM;
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
+ NULL, &summary_fops);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
- if (!d)
- return -ENOMEM;
debugfs_create_file("current_state", 0444,
d, genpd, &status_fops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0992e67e862b..893ae464bfd6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -124,6 +124,10 @@ void device_pm_unlock(void)
*/
void device_pm_add(struct device *dev)
{
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
@@ -142,6 +146,9 @@ void device_pm_add(struct device *dev)
*/
void device_pm_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
+
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
@@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
goto Complete;
+ }
pm_runtime_enable(dev);
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ccd296dbb95c..78937c45278c 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags);
*/
void update_pm_runtime_accounting(struct device *dev)
{
- unsigned long now = jiffies;
- unsigned long delta;
+ u64 now, last, delta;
- delta = now - dev->power.accounting_timestamp;
+ if (dev->power.disable_depth > 0)
+ return;
+
+ last = dev->power.accounting_timestamp;
+ now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
- if (dev->power.disable_depth > 0)
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timesptamp.
+ */
+ if (now < last)
return;
+ delta = now - last;
+
if (dev->power.runtime_status == RPM_SUSPENDED)
- dev->power.suspended_jiffies += delta;
+ dev->power.suspended_time += delta;
else
- dev->power.active_jiffies += delta;
+ dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
@@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
dev->power.runtime_status = status;
}
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = dev->power.suspended_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
@@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- u64 last_busy, expires = 0;
- u64 now = ktime_get_mono_fast_ns();
+ u64 expires;
if (!dev->power.use_autosuspend)
- goto out;
+ return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
- goto out;
-
- last_busy = READ_ONCE(dev->power.last_busy);
+ return 0;
- expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
- if (expires <= now)
- expires = 0; /* Already expired. */
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
- out:
- return expires;
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
@@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
pm_runtime_put_noidle(dev);
}
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
if (!dev->power.disable_depth++)
__pm_runtime_barrier(dev);
@@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0)
+ if (dev->power.disable_depth > 0) {
dev->power.disable_depth--;
- else
+
+ /* About to enable runtime pm, set accounting_timestamp to now */
+ if (!dev->power.disable_depth)
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+ } else {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ }
WARN(!dev->power.disable_depth &&
dev->power.runtime_status == RPM_SUSPENDED &&
@@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.accounting_timestamp = jiffies;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d713738ce796..c6bf76124184 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
+ u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
+ tmp = dev->power.active_time;
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
+ u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n",
- jiffies_to_msecs(dev->power.suspended_jiffies));
+ tmp = dev->power.suspended_time;
+ do_div(tmp, NSEC_PER_MSEC);
+ ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
@@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev)
{
int rc;
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
@@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fa1898755a3..f1fee72ed970 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
- * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 608af20a3494..b22e6bba71f1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers"
config CPUFREQ_DT
tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF
- # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
- depends on !CPU_THERMAL || THERMAL
select CPUFREQ_DT_PLATDEV
select PM_OPP
help
@@ -327,7 +325,6 @@ endif
config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
- depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ
help
This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1a6778e81f90..179a1d302f48 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
+config ARM_ARMADA_8K_CPUFREQ
+ tristate "Armada 8K CPUFreq driver"
+ depends on ARCH_MVEBU && CPUFREQ_DT
+ help
+ This enables the CPUFreq driver support for Marvell
+ Armada8k SOCs.
+ Armada8K device has the AP806 which supports scaling
+ to any full integer divider.
+
+ If in doubt, say N.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- depends on !CPU_THERMAL || THERMAL
help
This adds the CPUfreq driver support for ARM platforms using SCPI
protocol for CPU power management.
@@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs"
depends on ARCH_MEDIATEK && REGULATOR
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUFreq driver support for MediaTek SoCs.
@@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SCMI_CPUFREQ
tristate "SCMI based CPUfreq driver"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
- depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
This adds the CPUfreq driver support for ARM platforms using SCMI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 08c071be2491..689b26c6f949 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
+obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d62fd374d5c7..c72258a44ba4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
{
int ret;
- if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
+ pr_debug("Boost capabilities not present in the processor\n");
return;
+ }
acpi_cpufreq_driver.set_boost = set_boost;
acpi_cpufreq_driver.boost_enabled = boost_state(0);
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index cf62a1f64dd7..7fe52fcddcf1 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
+ dev_pm_opp_of_register_em(policy->cpus);
+
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
new file mode 100644
index 000000000000..b3f4bd647e9b
--- /dev/null
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPUFreq support for Armada 8K
+ *
+ * Copyright (C) 2018 Marvell
+ *
+ * Omri Itach <omrii@marvell.com>
+ * Gregory Clement <gregory.clement@bootlin.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+/*
+ * Setup the opps list with the divider for the max frequency, that
+ * will be filled at runtime.
+ */
+static const int opps_div[] __initconst = {1, 2, 3, 4};
+
+static struct platform_device *armada_8k_pdev;
+
+struct freq_table {
+ struct device *cpu_dev;
+ unsigned int freq[ARRAY_SIZE(opps_div)];
+};
+
+/* If the CPUs share the same clock, then they are in the same cluster. */
+static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
+ struct cpumask *cpumask)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("Failed to get cpu%d device\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ pr_warn("Cannot get clock for CPU %d\n", cpu);
+ } else {
+ if (clk_is_match(clk, cur_clk))
+ cpumask_set_cpu(cpu, cpumask);
+
+ clk_put(clk);
+ }
+ }
+}
+
+static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
+ struct freq_table *freq_tables,
+ int opps_index)
+{
+ unsigned int cur_frequency;
+ unsigned int freq;
+ int i, ret;
+
+ /* Get nominal (current) CPU frequency. */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
+ return -EINVAL;
+ }
+
+ freq_tables[opps_index].cpu_dev = cpu_dev;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ freq = cur_frequency / opps_div[i];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret)
+ return ret;
+
+ freq_tables[opps_index].freq[i] = freq;
+ }
+
+ return 0;
+}
+
+static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
+{
+ int opps_index, nb_cpus = num_possible_cpus();
+
+ for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ int i;
+
+ /* If cpu_dev is NULL then we reached the end of the array */
+ if (!freq_tables[opps_index].cpu_dev)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ /*
+ * A 0Hz frequency is not valid, this meant
+ * that it was not yet initialized so there is
+ * no more opp to free
+ */
+ if (freq_tables[opps_index].freq[i] == 0)
+ break;
+
+ dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
+ freq_tables[opps_index].freq[i]);
+ }
+ }
+
+ kfree(freq_tables);
+}
+
+static int __init armada_8k_cpufreq_init(void)
+{
+ int ret = 0, opps_index = 0, cpu, nb_cpus;
+ struct freq_table *freq_tables;
+ struct device_node *node;
+ struct cpumask cpus;
+
+ node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
+ if (!node || !of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+
+ nb_cpus = num_possible_cpus();
+ freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
+ cpumask_copy(&cpus, cpu_possible_mask);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and full integer
+ * divisions of it).
+ */
+ for_each_cpu(cpu, &cpus) {
+ struct cpumask shared_cpus;
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ ret = PTR_ERR(clk);
+ goto remove_opp;
+ }
+
+ ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
+ if (ret) {
+ clk_put(clk);
+ goto remove_opp;
+ }
+
+ opps_index++;
+ cpumask_clear(&shared_cpus);
+ armada_8k_get_sharing_cpus(clk, &shared_cpus);
+ dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
+ cpumask_andnot(&cpus, &cpus, &shared_cpus);
+ clk_put(clk);
+ }
+
+ armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
+ if (ret)
+ goto remove_opp;
+
+ platform_set_drvdata(armada_8k_pdev, freq_tables);
+
+ return 0;
+
+remove_opp:
+ armada_8k_cpufreq_free_table(freq_tables);
+ return ret;
+}
+module_init(armada_8k_cpufreq_init);
+
+static void __exit armada_8k_cpufreq_exit(void)
+{
+ struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
+
+ platform_device_unregister(armada_8k_pdev);
+ armada_8k_cpufreq_free_table(freq_tables);
+}
+module_exit(armada_8k_cpufreq_exit);
+
+MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
+MODULE_DESCRIPTION("Armada 8K cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fd25c21cee72..2ae978d27e61 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,6 +42,66 @@
*/
static struct cppc_cpudata **all_cpu_data;
+struct cppc_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE +1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static bool apply_hisi_workaround;
+
+static struct cppc_workaround_oem_info wa_info[] = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
+ unsigned int perf);
+
+/*
+ * HISI platform does not support delivered performance counter and
+ * reference performance counter. It can calculate the performance using the
+ * platform specific mechanism. We reuse the desired performance register to
+ * store the real performance calculated by the platform.
+ */
+static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
+{
+ struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
+ u64 desired_perf;
+ int ret;
+
+ ret = cppc_get_desired_perf(cpunum, &desired_perf);
+ if (ret < 0)
+ return -EIO;
+
+ return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
+}
+
+static void cppc_check_hisi_workaround(void)
+{
+ struct acpi_table_header *tbl;
+ acpi_status status = AE_OK;
+ int i;
+
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
+ if (ACPI_FAILURE(status) || !tbl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision)
+ apply_hisi_workaround = true;
+ }
+}
+
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
struct cppc_cpudata *cpu = all_cpu_data[cpunum];
int ret;
+ if (apply_hisi_workaround)
+ return hisi_cppc_cpufreq_get_rate(cpunum);
+
ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
if (ret)
return ret;
@@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void)
goto out;
}
+ cppc_check_hisi_workaround();
+
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
if (ret)
goto out;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e58bfcb1169e..bde28878725b 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
-#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/err.h>
@@ -30,7 +29,6 @@
struct private_data {
struct opp_table *opp_table;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
const char *reg_name;
bool have_static_opps;
};
@@ -280,6 +278,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
out_free_cpufreq_table:
@@ -297,11 +297,25 @@ out_put_clk:
return ret;
}
+static int cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* We did light-weight tear down earlier, nothing to do here */
+ return 0;
+}
+
+static int cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /*
+ * Preserve policy->driver_data and don't free resources on light-weight
+ * tear down.
+ */
+ return 0;
+}
+
static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
@@ -314,21 +328,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct private_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
.get = cpufreq_generic_get,
.init = cpufreq_init,
.exit = cpufreq_exit,
- .ready = cpufreq_ready,
+ .online = cpufreq_online,
+ .offline = cpufreq_offline,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e35a886e00bc..0e626b00053b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret, enable;
@@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu)
return -ENOMEM;
}
- cpumask_copy(policy->cpus, cpumask_of(cpu));
+ if (!new_policy && cpufreq_driver->online) {
+ ret = cpufreq_driver->online(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_exit_policy;
+ }
- /* call driver. From then on the cpufreq must be able
- * to accept all calls to ->verify and ->setpolicy for this CPU
- */
- ret = cpufreq_driver->init(policy);
- if (ret) {
- pr_debug("initialization failed\n");
- goto out_free_policy;
- }
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+ } else {
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_exit_policy;
+ /*
+ * Call driver. From then on the cpufreq must be able
+ * to accept all calls to ->verify and ->setpolicy for this CPU.
+ */
+ ret = cpufreq_driver->init(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_free_policy;
+ }
- down_write(&policy->rwsem);
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_exit_policy;
- if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
}
+ down_write(&policy->rwsem);
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
@@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
__func__, cpu, ret);
- /* cpufreq_policy_free() will notify based on this */
- new_policy = false;
goto out_destroy_policy;
}
@@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
+ if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
pr_debug("initialization complete\n");
return 0;
@@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu)
goto unlock;
}
+ if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy);
@@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu)
cpufreq_exit_governor(policy);
/*
- * Perform the ->exit() even during light-weight tear-down,
- * since this is a core component, and is essential for the
- * subsequent light-weight ->init() to succeed.
+ * Perform the ->offline() during light-weight tear-down, as
+ * that allows fast recovery when the CPU comes back.
*/
- if (cpufreq_driver->exit) {
+ if (cpufreq_driver->offline) {
+ cpufreq_driver->offline(policy);
+ } else if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
@@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, dev);
- if (cpumask_empty(policy->real_cpus))
+ if (cpumask_empty(policy->real_cpus)) {
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline)
+ cpufreq_driver->exit(policy);
+
cpufreq_policy_free(policy);
+ }
}
/**
@@ -2192,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
-/*
- * policy : current policy.
- * new_policy: policy to be set.
+/**
+ * cpufreq_set_policy - Modify cpufreq policy parameters.
+ * @policy: Policy object to modify.
+ * @new_policy: New policy data.
+ *
+ * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
+ * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
+ * the driver's ->verify() callback again and run the notifiers for it again
+ * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
+ * of @new_policy to @policy and either invoke the driver's ->setpolicy()
+ * callback (if present) or carry out a governor update for @policy. That is,
+ * run the current governor's ->limits() callback (if the governor field in
+ * @new_policy points to the same object as the one in @policy) or replace the
+ * governor for @policy with the new one stored in @new_policy.
+ *
+ * The cpuinfo part of @policy is not updated by this function.
*/
static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
@@ -2247,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (cpufreq_driver->setpolicy) {
policy->policy = new_policy->policy;
pr_debug("setting range\n");
- return cpufreq_driver->setpolicy(new_policy);
+ return cpufreq_driver->setpolicy(policy);
}
if (new_policy->governor == policy->governor) {
- pr_debug("cpufreq: governor limits update\n");
+ pr_debug("governor limits update\n");
cpufreq_governor_limits(policy);
return 0;
}
@@ -2272,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (!ret) {
ret = cpufreq_start_governor(policy);
if (!ret) {
- pr_debug("cpufreq: governor change\n");
+ pr_debug("governor change\n");
sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
@@ -2293,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
}
/**
- * cpufreq_update_policy - re-evaluate an existing cpufreq policy
- * @cpu: CPU which shall be re-evaluated
+ * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
+ * @cpu: CPU to re-evaluate the policy for.
*
- * Useful for policy notifiers which have different necessities
- * at different times.
+ * Update the current frequency for the cpufreq policy of @cpu and use
+ * cpufreq_set_policy() to re-apply the min and max limits saved in the
+ * user_policy sub-structure of that policy, which triggers the evaluation
+ * of policy notifiers and the cpufreq driver's ->verify() callback for the
+ * policy in question, among other things.
*/
void cpufreq_update_policy(unsigned int cpu)
{
@@ -2312,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu)
if (policy_is_inactive(policy))
goto unlock;
- pr_debug("updating policy for CPU %u\n", cpu);
- memcpy(&new_policy, policy, sizeof(*policy));
- new_policy.min = policy->user_policy.min;
- new_policy.max = policy->user_policy.max;
-
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- if (cpufreq_suspended)
- goto unlock;
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
+ goto unlock;
- new_policy.cur = cpufreq_update_current_freq(policy);
- if (WARN_ON(!new_policy.cur))
- goto unlock;
- }
+ pr_debug("updating policy for CPU %u\n", cpu);
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
cpufreq_set_policy(policy, &new_policy);
@@ -2479,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) ||
- (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
+ (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
+ (!driver_data->online != !driver_data->offline))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129844a5..e2db5581489a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
- spin_lock(&cpufreq_stats_lock);
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
- spin_unlock(&cpufreq_stats_lock);
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
+ spin_lock(&cpufreq_stats_lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
+ spin_unlock(&cpufreq_stats_lock);
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%d\n", policy->stats->total_trans);
}
+cpufreq_freq_attr_ro(total_trans);
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
@@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
+ spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats);
+ spin_unlock(&cpufreq_stats_lock);
+
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
(unsigned long long)
@@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
}
return len;
}
+cpufreq_freq_attr_ro(time_in_state);
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
size_t count)
@@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
cpufreq_stats_clear_table(policy->stats);
return count;
}
+cpufreq_freq_attr_wo(reset);
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
@@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro(trans_table);
-cpufreq_freq_attr_ro(total_trans);
-cpufreq_freq_attr_ro(time_in_state);
-cpufreq_freq_attr_wo(reset);
-
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
@@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index)
return;
+ spin_lock(&cpufreq_stats_lock);
cpufreq_stats_update(stats);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
+ spin_unlock(&cpufreq_stats_lock);
}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index d54a27c99121..940fe85db97a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -23,13 +23,10 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/platform_data/davinci-cpufreq.h>
#include <linux/platform_device.h>
#include <linux/export.h>
-#include <mach/hardware.h>
-#include <mach/cpufreq.h>
-#include <mach/common.h>
-
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 60bea302abbe..2d3ef208dd70 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2;
/* Allocate private data and frequency table for current cpu */
- centaur = kzalloc(sizeof(*centaur)
- + (states + 1) * sizeof(struct cpufreq_frequency_table),
- GFP_KERNEL);
+ centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
+ GFP_KERNEL);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fedf627e000..a4ff09f91c8f 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
-static struct thermal_cooling_device *cdev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
@@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
-static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
-{
- cdev = of_cpufreq_cooling_register(policy);
-
- if (!cdev)
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
-}
-
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
@@ -210,26 +198,19 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
+ dev_pm_opp_of_register_em(policy->cpus);
return ret;
}
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_cooling_unregister(cdev);
-
- return 0;
-}
-
static struct cpufreq_driver imx6q_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
- .exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
- .ready = imx6q_cpufreq_ready,
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dd66decf2087..002f5169d4eb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -50,6 +50,8 @@
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
+
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void)
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", global.object); \
}
@@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf);
static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret;
}
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
}
static ssize_t show_turbo_pct(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
}
static ssize_t show_num_pstates(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
}
static ssize_t show_no_turbo(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret;
}
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
}
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hwp_boost);
}
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+ struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void)
return ret;
}
-static int intel_pstate_get_base_pstate(struct cpudata *cpu)
-{
- return global.no_turbo || global.turbo_disabled ?
- cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
-}
-
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
- int pstate;
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
update_turbo_state();
- pstate = intel_pstate_get_base_pstate(cpu);
- pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
intel_pstate_set_pstate(cpu, pstate);
}
@@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
static inline int32_t get_target_pstate(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
- int32_t busy_frac, boost;
+ int32_t busy_frac;
int target, avg_pstate;
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc);
- boost = cpu->iowait_boost;
- cpu->iowait_boost >>= 1;
-
- if (busy_frac < boost)
- busy_frac = boost;
+ if (busy_frac < cpu->iowait_boost)
+ busy_frac = cpu->iowait_boost;
sample->busy_scaled = busy_frac * 100;
@@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
- int max_pstate = intel_pstate_get_base_pstate(cpu);
- int min_pstate;
+ int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
+ int max_pstate = max(min_pstate, cpu->max_perf_ratio);
- min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
- max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate);
}
@@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if (smp_processor_id() != cpu->cpu)
return;
+ delta_ns = time - cpu->last_update;
if (flags & SCHED_CPUFREQ_IOWAIT) {
- cpu->iowait_boost = int_tofp(1);
- cpu->last_update = time;
- /*
- * The last time the busy was 100% so P-state was max anyway
- * so avoid overhead of computation.
- */
- if (fp_toint(cpu->sample.busy_scaled) == 100)
- return;
-
- goto set_pstate;
+ /* Start over if the CPU may have been idle. */
+ if (delta_ns > TICK_NSEC) {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ } else if (cpu->iowait_boost) {
+ cpu->iowait_boost <<= 1;
+ if (cpu->iowait_boost > int_tofp(1))
+ cpu->iowait_boost = int_tofp(1);
+ } else {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ }
} else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */
- delta_ns = time - cpu->last_update;
if (delta_ns > TICK_NSEC)
cpu->iowait_boost = 0;
+ else
+ cpu->iowait_boost >>= 1;
}
cpu->last_update = time;
delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return;
-set_pstate:
if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_pstate(cpu);
}
@@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
if (hwp_active) {
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
} else {
- max_state = intel_pstate_get_base_pstate(cpu);
+ max_state = global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate;
}
@@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void)
kfree(pss);
}
+ pr_debug("ACPI _PSS not found\n");
return true;
}
@@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void)
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
- return true;
+ goto not_found;
+
+ if (acpi_has_method(handle, "PCCH"))
+ return false;
- return !acpi_has_method(handle, "PCCH");
+not_found:
+ pr_debug("ACPI PCCH not found\n");
+ return true;
}
static bool __init intel_pstate_has_acpi_ppc(void)
@@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void)
if (acpi_has_method(pr->handle, "_PPC"))
return true;
}
+ pr_debug("ACPI _PPC not found\n");
return false;
}
@@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
- if ( misc_pwr & (1 << 8))
+ if (misc_pwr & (1 << 8)) {
+ pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
return true;
+ }
}
idx = acpi_match_platform_list(plat_info);
@@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void)
}
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
- if (!id)
+ if (!id) {
+ pr_info("CPU ID not supported\n");
return -ENODEV;
+ }
copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
}
- if (intel_pstate_msrs_not_valid())
+ if (intel_pstate_msrs_not_valid()) {
+ pr_info("Invalid MSRs\n");
return -ENODEV;
+ }
hwp_cpu_matched:
/*
* The Intel pstate driver will be ignored if the platform
* firmware has its own power management modes.
*/
- if (intel_pstate_platform_pwr_mgmt_exists())
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
return -ENODEV;
+ }
if (!hwp_active && hwp_only)
return -ENOTSUPP;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 279bd9e9fa95..fb546e0d0356 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
case TYPE_POWERSAVER:
pr_cont("Powersaver supported\n");
break;
- };
+ }
/* Doesn't hurt */
longhaul_setup_southbridge();
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d39818..48e9829274c6 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/cpu.h>
-#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/module.h>
@@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info {
struct regulator *sram_reg;
struct clk *cpu_clk;
struct clk *inter_clk;
- struct thermal_cooling_device *cdev;
struct list_head list_head;
int intermediate_voltage;
bool need_voltage_tracking;
@@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
#define DYNAMIC_POWER "dynamic-power-coefficient"
-static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct mtk_cpu_dvfs_info *info = policy->driver_data;
-
- info->cdev = of_cpufreq_cooling_register(policy);
-}
-
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
{
struct device *cpu_dev;
@@ -465,6 +456,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
}
@@ -472,7 +465,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- cpufreq_cooling_unregister(info->cdev);
dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
return 0;
@@ -480,13 +472,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver mtk_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = mtk_cpufreq_set_target,
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
- .ready = mtk_cpufreq_ready,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 71e81bbf031b..68052b74d28f 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
- if (!result)
+ if (!result) {
+ dev_pm_opp_of_register_em(policy->cpus);
return 0;
+ }
freq_table_free();
fail:
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 099a849396f6..1e5e64643c3a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu)
if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
- };
+ }
offset = &(pccp->package.elements[0]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 7e7ad3879c4e..d2230812fa4b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -244,6 +244,7 @@ static int init_powernv_pstates(void)
u32 len_ids, len_freqs;
u32 pstate_min, pstate_max, pstate_nominal;
u32 pstate_turbo, pstate_ultra_turbo;
+ int rc = -ENODEV;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
@@ -327,8 +328,11 @@ next:
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id & 0xFF;
- revmap_data = (struct pstate_idx_revmap_data *)
- kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+ revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+ if (!revmap_data) {
+ rc = -ENOMEM;
+ goto out;
+ }
revmap_data->pstate_id = id & 0xFF;
revmap_data->cpufreq_table_idx = i;
@@ -357,7 +361,7 @@ next:
return 0;
out:
of_node_put(power_mgt);
- return -ENODEV;
+ return rc;
}
/* Returns the CPU frequency corresponding to the pstate_id. */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d83939a1b3d4..4b0b50403901 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -10,18 +10,21 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
#define LUT_L_VAL GENMASK(7, 0)
#define LUT_CORE_COUNT GENMASK(18, 16)
+#define LUT_VOLT GENMASK(11, 0)
#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
/* Register offsets */
#define REG_ENABLE 0x0
-#define REG_LUT_TABLE 0x110
+#define REG_FREQ_LUT 0x110
+#define REG_VOLT_LUT 0x114
#define REG_PERF_STATE 0x920
static unsigned long cpu_hw_rate, xo_rate;
@@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
return policy->freq_table[index].frequency;
}
-static int qcom_cpufreq_hw_read_lut(struct device *dev,
+static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
struct cpufreq_policy *policy,
void __iomem *base)
{
u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
+ u32 volt;
unsigned int max_cores = cpumask_weight(policy->cpus);
struct cpufreq_frequency_table *table;
@@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
return -ENOMEM;
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
- data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE);
+ data = readl_relaxed(base + REG_FREQ_LUT +
+ i * LUT_ROW_SIZE);
src = FIELD_GET(LUT_SRC, data);
lval = FIELD_GET(LUT_L_VAL, data);
core_count = FIELD_GET(LUT_CORE_COUNT, data);
+ data = readl_relaxed(base + REG_VOLT_LUT +
+ i * LUT_ROW_SIZE);
+ volt = FIELD_GET(LUT_VOLT, data) * 1000;
+
if (src)
freq = xo_rate * lval / 1000;
else
freq = cpu_hw_rate / 1000;
- /* Ignore boosts in the middle of the table */
- if (core_count != max_cores) {
- table[i].frequency = CPUFREQ_ENTRY_INVALID;
- } else {
+ if (freq != prev_freq && core_count == max_cores) {
table[i].frequency = freq;
- dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i,
+ dev_pm_opp_add(cpu_dev, freq * 1000, volt);
+ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
freq, core_count);
+ } else {
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
/*
@@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
if (prev_cc != max_cores) {
prev->frequency = prev_freq;
prev->flags = CPUFREQ_BOOST_FREQ;
+ dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
}
break;
@@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
table[i].frequency = CPUFREQ_TABLE_END;
policy->freq_table = table;
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
return 0;
}
@@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
struct device *dev = &global_pdev->dev;
struct of_phandle_args args;
struct device_node *cpu_np;
+ struct device *cpu_dev;
struct resource *res;
void __iomem *base;
int ret, index;
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
cpu_np = of_cpu_device_node_get(policy->cpu);
if (!cpu_np)
return -EINVAL;
@@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = base + REG_PERF_STATE;
- ret = qcom_cpufreq_hw_read_lut(dev, policy, base);
+ ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
goto error;
}
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
policy->fast_switch_possible = true;
return 0;
@@ -215,8 +243,10 @@ error:
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
void __iomem *base = policy->driver_data - REG_PERF_STATE;
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
kfree(policy->freq_table);
devm_iounmap(&global_pdev->dev, base);
@@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qcom_cpufreq_hw_target_index,
.get = qcom_cpufreq_hw_get,
@@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void)
{
return platform_driver_register(&qcom_cpufreq_hw_driver);
}
-subsys_initcall(qcom_cpufreq_hw_init);
+device_initcall(qcom_cpufreq_hw_init);
static void __exit qcom_cpufreq_hw_exit(void)
{
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 2a3675c24032..dd64dcf89c74 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,7 +42,7 @@ enum _msm8996_version {
NUM_OF_MSM8996_VERSIONS,
};
-struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{
@@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
{
- struct opp_table *opp_tables[NR_CPUS] = {0};
+ struct opp_table **opp_tables;
enum _msm8996_version msm8996_version;
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
@@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
}
kfree(speedbin);
+ opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
+ if (!opp_tables)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (NULL == cpu_dev) {
@@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
NULL, 0);
- if (!IS_ERR(cpufreq_dt_pdev))
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, opp_tables);
return 0;
+ }
ret = PTR_ERR(cpufreq_dt_pdev);
dev_err(cpu_dev, "Failed to register platform device\n");
@@ -163,13 +169,23 @@ free_opp:
break;
dev_pm_opp_put_supported_hw(opp_tables[cpu]);
}
+ kfree(opp_tables);
return ret;
}
static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
{
+ struct opp_table **opp_tables = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_supported_hw(opp_tables[cpu]);
+
+ kfree(opp_tables);
+
return 0;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3d773f64b4df..4295e5476264 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -31,7 +30,6 @@
struct cpu_data {
struct clk **pclk;
struct cpufreq_frequency_table *table;
- struct thermal_cooling_device *cdev;
};
/*
@@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
- cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk);
kfree(data->table);
kfree(data);
@@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
return clk_set_parent(policy->clk, parent);
}
-
-static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct cpu_data *cpud = policy->driver_data;
-
- cpud->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS |
+ CPUFREQ_IS_COOLING_DEV,
.init = qoriq_cpufreq_cpu_init,
.exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .ready = qoriq_cpufreq_ready,
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index dbecd7667db2..5b4289460bc9 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- int id;
+ int id, result = 0;
/*
* HACK: This is a temporary workaround to get access to clock
@@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* this whole driver as soon as S5PV210 gets migrated to use
* cpufreq-dt driver.
*/
+ arm_regulator = regulator_get(NULL, "vddarm");
+ if (IS_ERR(arm_regulator)) {
+ if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
+ pr_debug("vddarm regulator not ready, defer\n");
+ else
+ pr_err("failed to get regulator vddarm\n");
+ return PTR_ERR(arm_regulator);
+ }
+
+ int_regulator = regulator_get(NULL, "vddint");
+ if (IS_ERR(int_regulator)) {
+ if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
+ pr_debug("vddint regulator not ready, defer\n");
+ else
+ pr_err("failed to get regulator vddint\n");
+ result = PTR_ERR(int_regulator);
+ goto err_int_regulator;
+ }
+
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {
pr_err("%s: failed to find clock controller DT node\n",
__func__);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_clock;
}
clk_base = of_iomap(np, 0);
of_node_put(np);
if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__);
- return -EFAULT;
+ result = -EFAULT;
+ goto err_clock;
}
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
@@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
__func__, np);
of_node_put(np);
- return id;
+ result = id;
+ goto err_clk_base;
}
dmc_base[id] = of_iomap(np, 0);
@@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
pr_err("%s: failed to map dmc%d registers\n",
__func__, id);
of_node_put(np);
- return -EFAULT;
+ result = -EFAULT;
+ goto err_dmc;
}
}
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
if (!dmc_base[id]) {
pr_err("%s: failed to find dmc%d node\n", __func__, id);
- return -ENODEV;
+ result = -ENODEV;
+ goto err_dmc;
}
}
- arm_regulator = regulator_get(NULL, "vddarm");
- if (IS_ERR(arm_regulator)) {
- pr_err("failed to get regulator vddarm\n");
- return PTR_ERR(arm_regulator);
- }
-
- int_regulator = regulator_get(NULL, "vddint");
- if (IS_ERR(int_regulator)) {
- pr_err("failed to get regulator vddint\n");
- regulator_put(arm_regulator);
- return PTR_ERR(int_regulator);
- }
-
register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
return cpufreq_register_driver(&s5pv210_driver);
+
+err_dmc:
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
+ if (dmc_base[id]) {
+ iounmap(dmc_base[id]);
+ dmc_base[id] = NULL;
+ }
+
+err_clk_base:
+ iounmap(clk_base);
+
+err_clock:
+ regulator_put(int_regulator);
+
+err_int_regulator:
+ regulator_put(arm_regulator);
+
+ return result;
}
static struct platform_driver s5pv210_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 9ed46d188cb5..e6182c89df79 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -11,7 +11,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
+#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
@@ -22,7 +22,6 @@
struct scmi_data {
int domain_id;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
};
static const struct scmi_handle *handle;
@@ -103,13 +102,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
return 0;
}
+static int __maybe_unused
+scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+ unsigned long Hz;
+ int ret, domain;
+
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ domain = handle->perf_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ /* Get the power cost of the performance domain. */
+ Hz = *KHz * 1000;
+ ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
+ if (ret)
+ return ret;
+
+ /* The EM framework specifies the frequency in KHz. */
+ *KHz = Hz / 1000;
+
+ return 0;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
+ int ret, nr_opp;
unsigned int latency;
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -136,8 +164,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- ret = dev_pm_opp_get_opp_count(cpu_dev);
- if (ret <= 0) {
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0) {
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
@@ -171,6 +199,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
+
+ em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+
return 0;
out_free_priv:
@@ -185,7 +216,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
@@ -193,17 +223,11 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct scmi_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.target_index = scmi_cpufreq_set_target,
@@ -211,7 +235,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
- .ready = scmi_cpufreq_ready,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 99449738faa4..3f49427766b8 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -22,7 +22,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -34,7 +33,6 @@
struct scpi_data {
struct clk *clk;
struct device *cpu_dev;
- struct thermal_cooling_device *cdev;
};
static struct scpi_ops *scpi_ops;
@@ -170,6 +168,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = false;
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
return 0;
out_free_cpufreq_table:
@@ -186,7 +187,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv);
@@ -195,23 +195,16 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
-{
- struct scpi_data *priv = policy->driver_data;
-
- priv->cdev = of_cpufreq_cooling_register(policy);
-}
-
static struct cpufreq_driver scpi_cpufreq_driver = {
.name = "scpi-cpufreq",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
- .ready = scpi_cpufreq_ready,
.target_index = scpi_cpufreq_set_target,
};
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index fbbcb88db061..5d8a09b82efb 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu)
unsigned int speed;
/* You're supposed to ensure CPU is online. */
- if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
- BUG();
+ BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
pr_debug("detected %u kHz as current frequency\n", speed);
return speed;
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index ba3795e13ac6..5e748c8a5c9a 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -118,6 +118,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ of_node_put(np);
+
return 0;
out_put_pllp_clk:
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 7e48eb5bf0a7..8caccbbd7353 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -4,7 +4,7 @@ config CPU_IDLE
bool "CPU idle PM support"
default y if ACPI || PPC_PSERIES
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
- select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE)
+ select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
@@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER
config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
+config CPU_IDLE_GOV_TEO
+ bool "Timer events oriented (TEO) governor (for tickless systems)"
+ help
+ This governor implements a simplified idle state selection method
+ focused on timer events and does not do any interactivity boosting.
+
+ Some workloads benefit from using it and it generally should be safe
+ to use. Say Y here if you are not happy with the alternatives.
+
config DT_IDLE_STATES
bool
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 53342b7f1010..add9569636b5 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -22,16 +22,12 @@
#include "dt_idle_states.h"
static int init_state_node(struct cpuidle_state *idle_state,
- const struct of_device_id *matches,
+ const struct of_device_id *match_id,
struct device_node *state_node)
{
int err;
- const struct of_device_id *match_id;
const char *desc;
- match_id = of_match_node(matches, state_node);
- if (!match_id)
- return -ENODEV;
/*
* CPUidle drivers are expected to initialize the const void *data
* pointer of the passed in struct of_device_id array to the idle
@@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
{
struct cpuidle_state *idle_state;
struct device_node *state_node, *cpu_node;
+ const struct of_device_id *match_id;
int i, err = 0;
const cpumask_t *cpumask;
unsigned int state_idx = start_idx;
@@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node)
break;
+ match_id = of_match_node(matches, state_node);
+ if (!match_id) {
+ err = -ENODEV;
+ break;
+ }
+
if (!of_device_is_available(state_node)) {
of_node_put(state_node);
continue;
@@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
}
idle_state = &drv->states[state_idx++];
- err = init_state_node(idle_state, matches, state_node);
+ err = init_state_node(idle_state, match_id, state_node);
if (err) {
pr_err("Parsing idle state node %pOF failed with err %d\n",
state_node, err);
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile
index 1b512722689f..4d8aff5248a8 100644
--- a/drivers/cpuidle/governors/Makefile
+++ b/drivers/cpuidle/governors/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
+obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
new file mode 100644
index 000000000000..7d05efdbd3c6
--- /dev/null
+++ b/drivers/cpuidle/governors/teo.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer events oriented CPU idle governor
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * The idea of this governor is based on the observation that on many systems
+ * timer events are two or more orders of magnitude more frequent than any
+ * other interrupts, so they are likely to be the most significant source of CPU
+ * wakeups from idle states. Moreover, information about what happened in the
+ * (relatively recent) past can be used to estimate whether or not the deepest
+ * idle state with target residency within the time to the closest timer is
+ * likely to be suitable for the upcoming idle time of the CPU and, if not, then
+ * which of the shallower idle states to choose.
+ *
+ * Of course, non-timer wakeup sources are more important in some use cases and
+ * they can be covered by taking a few most recent idle time intervals of the
+ * CPU into account. However, even in that case it is not necessary to consider
+ * idle duration values greater than the time till the closest timer, as the
+ * patterns that they may belong to produce average values close enough to
+ * the time till the closest timer (sleep length) anyway.
+ *
+ * Thus this governor estimates whether or not the upcoming idle time of the CPU
+ * is likely to be significantly shorter than the sleep length and selects an
+ * idle state for it in accordance with that, as follows:
+ *
+ * - Find an idle state on the basis of the sleep length and state statistics
+ * collected over time:
+ *
+ * o Find the deepest idle state whose target residency is less than or equal
+ * to the sleep length.
+ *
+ * o Select it if it matched both the sleep length and the observed idle
+ * duration in the past more often than it matched the sleep length alone
+ * (i.e. the observed idle duration was significantly shorter than the sleep
+ * length matched by it).
+ *
+ * o Otherwise, select the shallower state with the greatest matched "early"
+ * wakeups metric.
+ *
+ * - If the majority of the most recent idle duration values are below the
+ * target residency of the idle state selected so far, use those values to
+ * compute the new expected idle duration and find an idle state matching it
+ * (which has to be shallower than the one selected so far).
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched/clock.h>
+#include <linux/tick.h>
+
+/*
+ * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
+ * is used for decreasing metrics on a regular basis.
+ */
+#define PULSE 1024
+#define DECAY_SHIFT 3
+
+/*
+ * Number of the most recent idle duration values to take into consideration for
+ * the detection of wakeup patterns.
+ */
+#define INTERVALS 8
+
+/**
+ * struct teo_idle_state - Idle state data used by the TEO cpuidle governor.
+ * @early_hits: "Early" CPU wakeups "matching" this state.
+ * @hits: "On time" CPU wakeups "matching" this state.
+ * @misses: CPU wakeups "missing" this state.
+ *
+ * A CPU wakeup is "matched" by a given idle state if the idle duration measured
+ * after the wakeup is between the target residency of that state and the target
+ * residency of the next one (or if this is the deepest available idle state, it
+ * "matches" a CPU wakeup when the measured idle duration is at least equal to
+ * its target residency).
+ *
+ * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if
+ * it occurs significantly earlier than the closest expected timer event (that
+ * is, early enough to match an idle state shallower than the one matching the
+ * time till the closest timer event). Otherwise, the wakeup is "on time", or
+ * it is a "hit".
+ *
+ * A "miss" occurs when the given state doesn't match the wakeup, but it matches
+ * the time till the closest timer event used for idle state selection.
+ */
+struct teo_idle_state {
+ unsigned int early_hits;
+ unsigned int hits;
+ unsigned int misses;
+};
+
+/**
+ * struct teo_cpu - CPU data used by the TEO cpuidle governor.
+ * @time_span_ns: Time between idle state selection and post-wakeup update.
+ * @sleep_length_ns: Time till the closest timer event (at the selection time).
+ * @states: Idle states data corresponding to this CPU.
+ * @last_state: Idle state entered by the CPU last time.
+ * @interval_idx: Index of the most recent saved idle interval.
+ * @intervals: Saved idle duration values.
+ */
+struct teo_cpu {
+ u64 time_span_ns;
+ u64 sleep_length_ns;
+ struct teo_idle_state states[CPUIDLE_STATE_MAX];
+ int last_state;
+ int interval_idx;
+ unsigned int intervals[INTERVALS];
+};
+
+static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
+
+/**
+ * teo_update - Update CPU data after wakeup.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ */
+static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
+ int i, idx_hit = -1, idx_timer = -1;
+ unsigned int measured_us;
+
+ if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ /*
+ * One of the safety nets has triggered or this was a timer
+ * wakeup (or equivalent).
+ */
+ measured_us = sleep_length_us;
+ } else {
+ unsigned int lat = drv->states[cpu_data->last_state].exit_latency;
+
+ measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The delay between the wakeup and the first instruction
+ * executed by the CPU is not likely to be worst-case every
+ * time, so take 1/2 of the exit latency as a very rough
+ * approximation of the average of it.
+ */
+ if (measured_us >= lat)
+ measured_us -= lat / 2;
+ else
+ measured_us /= 2;
+ }
+
+ /*
+ * Decay the "early hits" metric for all of the states and find the
+ * states matching the sleep length and the measured idle duration.
+ */
+ for (i = 0; i < drv->state_count; i++) {
+ unsigned int early_hits = cpu_data->states[i].early_hits;
+
+ cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
+
+ if (drv->states[i].target_residency <= sleep_length_us) {
+ idx_timer = i;
+ if (drv->states[i].target_residency <= measured_us)
+ idx_hit = i;
+ }
+ }
+
+ /*
+ * Update the "hits" and "misses" data for the state matching the sleep
+ * length. If it matches the measured idle duration too, this is a hit,
+ * so increase the "hits" metric for it then. Otherwise, this is a
+ * miss, so increase the "misses" metric for it. In the latter case
+ * also increase the "early hits" metric for the state that actually
+ * matches the measured idle duration.
+ */
+ if (idx_timer >= 0) {
+ unsigned int hits = cpu_data->states[idx_timer].hits;
+ unsigned int misses = cpu_data->states[idx_timer].misses;
+
+ hits -= hits >> DECAY_SHIFT;
+ misses -= misses >> DECAY_SHIFT;
+
+ if (idx_timer > idx_hit) {
+ misses += PULSE;
+ if (idx_hit >= 0)
+ cpu_data->states[idx_hit].early_hits += PULSE;
+ } else {
+ hits += PULSE;
+ }
+
+ cpu_data->states[idx_timer].misses = misses;
+ cpu_data->states[idx_timer].hits = hits;
+ }
+
+ /*
+ * If the total time span between idle state selection and the "reflect"
+ * callback is greater than or equal to the sleep length determined at
+ * the idle state selection time, the wakeup is likely to be due to a
+ * timer event.
+ */
+ if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns)
+ measured_us = UINT_MAX;
+
+ /*
+ * Save idle duration values corresponding to non-timer wakeups for
+ * pattern detection.
+ */
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ if (cpu_data->interval_idx > INTERVALS)
+ cpu_data->interval_idx = 0;
+}
+
+/**
+ * teo_find_shallower_state - Find shallower idle state matching given duration.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @state_idx: Index of the capping idle state.
+ * @duration_us: Idle duration value to match.
+ */
+static int teo_find_shallower_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int state_idx,
+ unsigned int duration_us)
+{
+ int i;
+
+ for (i = state_idx - 1; i >= 0; i--) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ state_idx = i;
+ if (drv->states[i].target_residency <= duration_us)
+ break;
+ }
+ return state_idx;
+}
+
+/**
+ * teo_select - Selects the next idle state to enter.
+ * @drv: cpuidle driver containing state data.
+ * @dev: Target CPU.
+ * @stop_tick: Indication on whether or not to stop the scheduler tick.
+ */
+static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ unsigned int duration_us, count;
+ int max_early_idx, idx, i;
+ ktime_t delta_tick;
+
+ if (cpu_data->last_state >= 0) {
+ teo_update(drv, dev);
+ cpu_data->last_state = -1;
+ }
+
+ cpu_data->time_span_ns = local_clock();
+
+ cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
+ duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+
+ count = 0;
+ max_early_idx = -1;
+ idx = -1;
+
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
+
+ if (s->disabled || su->disable) {
+ /*
+ * If the "early hits" metric of a disabled state is
+ * greater than the current maximum, it should be taken
+ * into account, because it would be a mistake to select
+ * a deeper state with lower "early hits" metric. The
+ * index cannot be changed to point to it, however, so
+ * just increase the max count alone and let the index
+ * still point to a shallower idle state.
+ */
+ if (max_early_idx >= 0 &&
+ count < cpu_data->states[i].early_hits)
+ count = cpu_data->states[i].early_hits;
+
+ continue;
+ }
+
+ if (idx < 0)
+ idx = i; /* first enabled state */
+
+ if (s->target_residency > duration_us)
+ break;
+
+ if (s->exit_latency > latency_req) {
+ /*
+ * If we break out of the loop for latency reasons, use
+ * the target residency of the selected state as the
+ * expected idle duration to avoid stopping the tick
+ * as long as that target residency is low enough.
+ */
+ duration_us = drv->states[idx].target_residency;
+ goto refine;
+ }
+
+ idx = i;
+
+ if (count < cpu_data->states[i].early_hits &&
+ !(tick_nohz_tick_stopped() &&
+ drv->states[i].target_residency < TICK_USEC)) {
+ count = cpu_data->states[i].early_hits;
+ max_early_idx = i;
+ }
+ }
+
+ /*
+ * If the "hits" metric of the idle state matching the sleep length is
+ * greater than its "misses" metric, that is the one to use. Otherwise,
+ * it is more likely that one of the shallower states will match the
+ * idle duration observed after wakeup, so take the one with the maximum
+ * "early hits" metric, but if that cannot be determined, just use the
+ * state selected so far.
+ */
+ if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
+ max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_us = drv->states[idx].target_residency;
+ }
+
+refine:
+ if (idx < 0) {
+ idx = 0; /* No states enabled. Must use 0. */
+ } else if (idx > 0) {
+ u64 sum = 0;
+
+ count = 0;
+
+ /*
+ * Count and sum the most recent idle duration values less than
+ * the target residency of the state selected so far, find the
+ * max.
+ */
+ for (i = 0; i < INTERVALS; i++) {
+ unsigned int val = cpu_data->intervals[i];
+
+ if (val >= drv->states[idx].target_residency)
+ continue;
+
+ count++;
+ sum += val;
+ }
+
+ /*
+ * Give up unless the majority of the most recent idle duration
+ * values are in the interesting range.
+ */
+ if (count > INTERVALS / 2) {
+ unsigned int avg_us = div64_u64(sum, count);
+
+ /*
+ * Avoid spending too much time in an idle state that
+ * would be too shallow.
+ */
+ if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
+ idx = teo_find_shallower_state(drv, dev, idx, avg_us);
+ duration_us = avg_us;
+ }
+ }
+ }
+
+ /*
+ * Don't stop the tick if the selected state is a polling one or if the
+ * expected idle duration is shorter than the tick period length.
+ */
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
+ unsigned int delta_tick_us = ktime_to_us(delta_tick);
+
+ *stop_tick = false;
+
+ /*
+ * The tick is not going to be stopped, so if the target
+ * residency of the state to be returned is not within the time
+ * till the closest timer including the tick, try to correct
+ * that.
+ */
+ if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ }
+
+ return idx;
+}
+
+/**
+ * teo_reflect - Note that governor data for the CPU need to be updated.
+ * @dev: Target CPU.
+ * @state: Entered state.
+ */
+static void teo_reflect(struct cpuidle_device *dev, int state)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+
+ cpu_data->last_state = state;
+ /*
+ * If the wakeup was not "natural", but triggered by one of the safety
+ * nets, assume that the CPU might have been idle for the entire sleep
+ * length time.
+ */
+ if (dev->poll_time_limit ||
+ (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ dev->poll_time_limit = false;
+ cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ } else {
+ cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ }
+}
+
+/**
+ * teo_enable_device - Initialize the governor's data for the target CPU.
+ * @drv: cpuidle driver (not used).
+ * @dev: Target CPU.
+ */
+static int teo_enable_device(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int i;
+
+ memset(cpu_data, 0, sizeof(*cpu_data));
+
+ for (i = 0; i < INTERVALS; i++)
+ cpu_data->intervals[i] = UINT_MAX;
+
+ return 0;
+}
+
+static struct cpuidle_governor teo_governor = {
+ .name = "teo",
+ .rating = 19,
+ .enable = teo_enable_device,
+ .select = teo_select,
+ .reflect = teo_reflect,
+};
+
+static int __init teo_governor_init(void)
+{
+ return cpuidle_register_governor(&teo_governor);
+}
+
+postcore_initcall(teo_governor_init);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 017fc602a10e..cf7c66bb3ed9 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -5,6 +5,7 @@
*/
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
* counter value.
*/
spin_lock_irqsave(&i915->pmu.lock, flags);
- spin_lock(&kdev->power.lock);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
@@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
* suspended and if not we cannot do better than report the last
* known RC6 value.
*/
- if (kdev->power.runtime_status == RPM_SUSPENDED) {
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_jiffies_last =
- kdev->power.suspended_jiffies;
+ if (pm_runtime_status_suspended(kdev)) {
+ val = pm_runtime_suspended_time(kdev);
- val = kdev->power.suspended_jiffies -
- i915->pmu.suspended_jiffies_last;
- val += jiffies - kdev->power.accounting_timestamp;
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_time_last = val;
- val = jiffies_to_nsecs(val);
+ val -= i915->pmu.suspended_time_last;
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
@@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
}
- spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index b3728c5f13e7..4fc4f2478301 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -97,9 +97,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_jiffies_last: Cached suspend time from PM core.
+ * @suspended_time_last: Cached suspend time from PM core.
*/
- unsigned long suspended_jiffies_last;
+ u64 suspended_time_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8b5d85c91e9d..b8647b5c3d4d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1103,6 +1103,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv),
+ INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv),
{}
};
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e06a0ab05ad6..d7f97167cac3 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -551,9 +551,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
return ret;
}
-static inline int
-_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq)
+static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+ unsigned long freq)
{
int ret;
@@ -590,7 +589,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
}
/* Change frequency */
- ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
if (ret)
goto restore_voltage;
@@ -604,7 +603,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
return 0;
restore_freq:
- if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
+ if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
__func__, old_freq);
restore_voltage:
@@ -777,7 +776,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
opp->supplies);
} else {
/* Only frequency scaling */
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ ret = _generic_set_opp_clk_only(dev, clk, freq);
}
/* Scaling down? Configure required OPPs after frequency */
@@ -811,7 +810,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
struct opp_table *opp_table)
{
struct opp_device *opp_dev;
- int ret;
opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
if (!opp_dev)
@@ -823,10 +821,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
- ret = opp_debug_register(opp_dev, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
- __func__, ret);
+ opp_debug_register(opp_dev, opp_table);
return opp_dev;
}
@@ -1247,10 +1242,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
new_opp->opp_table = opp_table;
kref_init(&new_opp->kref);
- ret = opp_debug_create_one(new_opp, opp_table);
- if (ret)
- dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
- __func__, ret);
+ opp_debug_create_one(new_opp, opp_table);
if (!_opp_supported_by_regulators(new_opp, opp_table)) {
new_opp->available = false;
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index e6828e5f81b0..a1c57fe14de4 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
-static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
{
@@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return false;
+ debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+ &opp->supplies[i].u_volt);
- if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
- &opp->supplies[i].u_volt))
- return false;
+ debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+ &opp->supplies[i].u_volt_min);
- if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
- &opp->supplies[i].u_volt_min))
- return false;
+ debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+ &opp->supplies[i].u_volt_max);
- if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
- &opp->supplies[i].u_volt_max))
- return false;
-
- if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
- &opp->supplies[i].u_amp))
- return false;
+ debugfs_create_ulong("u_amp", S_IRUGO, d,
+ &opp->supplies[i].u_amp);
}
-
- return true;
}
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
@@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- if (!d)
- return -ENOMEM;
-
- if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available))
- return -ENOMEM;
-
- if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic))
- return -ENOMEM;
-
- if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo))
- return -ENOMEM;
-
- if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
- return -ENOMEM;
-
- if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
- return -ENOMEM;
- if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
- return -ENOMEM;
+ debugfs_create_bool("available", S_IRUGO, d, &opp->available);
+ debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
+ debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
+ debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
+ debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
+ debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate);
+ debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
+ &opp->clock_latency_ns);
- if (!opp_debug_create_supplies(opp, opp_table, d))
- return -ENOMEM;
-
- if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
- &opp->clock_latency_ns))
- return -ENOMEM;
+ opp_debug_create_supplies(opp, opp_table, d);
opp->dentry = d;
- return 0;
}
-static int opp_list_debug_create_dir(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
const struct device *dev = opp_dev->dev;
struct dentry *d;
@@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev,
/* Create device specific directory */
d = debugfs_create_dir(opp_table->dentry_name, rootdir);
- if (!d) {
- dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
- return -ENOMEM;
- }
opp_dev->dentry = d;
opp_table->dentry = d;
-
- return 0;
}
-static int opp_list_debug_create_link(struct opp_device *opp_dev,
- struct opp_table *opp_table)
+static void opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = opp_dev->dev;
char name[NAME_MAX];
- struct dentry *d;
opp_set_dev_name(opp_dev->dev, name);
/* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
- if (!d) {
- dev_err(dev, "%s: Failed to create link\n", __func__);
- return -ENOMEM;
- }
-
- opp_dev->dentry = d;
-
- return 0;
+ opp_dev->dentry = debugfs_create_symlink(name, rootdir,
+ opp_table->dentry_name);
}
/**
@@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev,
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
* device-opp is shared with other devices, then links will be created for all
* devices except the first.
- *
- * Return: 0 on success, otherwise negative error.
*/
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
{
- if (!rootdir) {
- pr_debug("%s: Uninitialized rootdir\n", __func__);
- return -EINVAL;
- }
-
if (opp_table->dentry)
- return opp_list_debug_create_link(opp_dev, opp_table);
-
- return opp_list_debug_create_dir(opp_dev, opp_table);
+ opp_list_debug_create_link(opp_dev, opp_table);
+ else
+ opp_list_debug_create_dir(opp_dev, opp_table);
}
static void opp_migrate_dentry(struct opp_device *opp_dev,
@@ -252,10 +204,6 @@ static int __init opp_debug_init(void)
{
/* Create /sys/kernel/debug/opp directory */
rootdir = debugfs_create_dir("opp", NULL);
- if (!rootdir) {
- pr_err("%s: Failed to create root directory\n", __func__);
- return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1779f2c93291..62504b18f198 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -20,6 +20,7 @@
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/energy_model.h>
#include "opp.h"
@@ -1049,3 +1050,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return of_node_get(opp->np);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
+
+/*
+ * Callback function provided to the Energy Model framework upon registration.
+ * This computes the power estimated by @CPU at @kHz if it is the frequency
+ * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
+ * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
+ * frequency and @mW to the associated power. The power is estimated as
+ * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
+ * the voltage and frequency of the OPP.
+ *
+ * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
+ * calculation failed because of missing parameters, 0 otherwise.
+ */
+static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
+ int cpu)
+{
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long mV, Hz;
+ u32 cap;
+ u64 tmp;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret)
+ return -EINVAL;
+
+ Hz = *kHz * 1000;
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
+ if (IS_ERR(opp))
+ return -EINVAL;
+
+ mV = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ if (!mV)
+ return -EINVAL;
+
+ tmp = (u64)cap * mV * mV * (Hz / 1000000);
+ do_div(tmp, 1000000000);
+
+ *mW = (unsigned long)tmp;
+ *kHz = Hz / 1000;
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
+ * @cpus : CPUs for which an Energy Model has to be registered
+ *
+ * This checks whether the "dynamic-power-coefficient" devicetree property has
+ * been specified, and tries to register an Energy Model with it if it has.
+ */
+void dev_pm_opp_of_register_em(struct cpumask *cpus)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
+ int ret, nr_opp, cpu = cpumask_first(cpus);
+ struct device *cpu_dev;
+ struct device_node *np;
+ u32 cap;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return;
+
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0)
+ return;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return;
+
+ /*
+ * Register an EM only if the 'dynamic-power-coefficient' property is
+ * set in devicetree. It is assumed the voltage values are known if that
+ * property is set since it is useless otherwise. If voltages are not
+ * known, just let the EM registration fail with an error to alert the
+ * user about the inconsistent configuration.
+ */
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
+ of_node_put(np);
+ if (ret || !cap)
+ return;
+
+ em_register_perf_domain(cpus, nr_opp, &em_cb);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 4458175aa661..569b3525aa67 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -238,18 +238,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
-int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
#else
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
-static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct opp_table *opp_table)
-{ return 0; }
-static inline int opp_debug_register(struct opp_device *opp_dev,
- struct opp_table *opp_table)
-{ return 0; }
+static inline void opp_debug_create_one(struct dev_pm_opp *opp,
+ struct opp_table *opp_table) { }
+
+static inline void opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table) { }
static inline void opp_debug_unregister(struct opp_device *opp_dev,
struct opp_table *opp_table)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 6cdb2c14eee4..4347f15165f8 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core),
INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core),
INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core),
+ INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
@@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core),
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 30323426902e..58bb7d72dc2b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -152,6 +152,7 @@ config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
depends on THERMAL_OF
+ depends on THERMAL=y
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists