summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/driver.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle/driver.c')
-rw-r--r--drivers/cpuidle/driver.c150
1 files changed, 98 insertions, 52 deletions
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index dc32f34e68d9..370664c47e65 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -8,6 +8,8 @@
* This code is licenced under the GPL.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -16,6 +18,7 @@
#include <linux/cpumask.h>
#include <linux/tick.h>
#include <linux/cpu.h>
+#include <linux/math64.h>
#include "cpuidle.h"
@@ -62,24 +65,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
*/
static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_driver *old_drv;
- if (__cpuidle_get_cpu_driver(cpu)) {
- __cpuidle_unset_driver(drv);
+ old_drv = __cpuidle_get_cpu_driver(cpu);
+ if (old_drv && old_drv != drv)
return -EBUSY;
- }
+ }
+ for_each_cpu(cpu, drv->cpumask)
per_cpu(cpuidle_drivers, cpu) = drv;
- }
return 0;
}
@@ -156,8 +158,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
- drv->refcnt = 0;
-
/*
* Use all possible CPUs as the default, because if the kernel boots
* with some CPUs offline and then we online one of them, the CPU
@@ -166,16 +166,43 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
if (!drv->cpumask)
drv->cpumask = (struct cpumask *)cpu_possible_mask;
- /*
- * Look for the timer stop flag in the different states, so that we know
- * if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually one of the deeper states have this flag set.
- */
- for (i = drv->state_count - 1; i >= 0 ; i--) {
- if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+
+ /*
+ * Look for the timer stop flag in the different states and if
+ * it is found, indicate that the broadcast timer has to be set
+ * up.
+ */
+ if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
drv->bctimer = 1;
- break;
- }
+
+ /*
+ * The core will use the target residency and exit latency
+ * values in nanoseconds, but allow drivers to provide them in
+ * microseconds too.
+ */
+ if (s->target_residency > 0)
+ s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
+ else if (s->target_residency_ns < 0)
+ s->target_residency_ns = 0;
+ else
+ s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
+
+ if (s->exit_latency > 0)
+ s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
+ else if (s->exit_latency_ns < 0)
+ s->exit_latency_ns = 0;
+ else
+ s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
+
+ /*
+ * Warn if the exit latency of a CPU idle state exceeds its
+ * target residency which is assumed to never happen in cpuidle
+ * in multiple places.
+ */
+ if (s->exit_latency_ns > s->target_residency_ns)
+ pr_warn("Idle state %d target residency too low\n", i);
}
}
@@ -230,9 +257,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
*/
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (WARN_ON(drv->refcnt > 0))
- return;
-
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
@@ -247,19 +271,32 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
* @drv: a pointer to a valid struct cpuidle_driver
*
* Register the driver under a lock to prevent concurrent attempts to
- * [un]register the driver from occuring at the same time.
+ * [un]register the driver from occurring at the same time.
*
* Returns 0 on success, a negative error code (returned by
* __cpuidle_register_driver()) otherwise.
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
+ struct cpuidle_governor *gov;
int ret;
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+ if (!ret && !strlen(param_governor) && drv->governor &&
+ (cpuidle_get_driver() == drv)) {
+ mutex_lock(&cpuidle_lock);
+ gov = cpuidle_find_governor(drv->governor);
+ if (gov) {
+ cpuidle_prev_governor = cpuidle_curr_governor;
+ if (cpuidle_switch_governor(gov) < 0)
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
@@ -269,14 +306,26 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver);
* @drv: a pointer to a valid struct cpuidle_driver
*
* Unregisters the cpuidle driver under a lock to prevent concurrent attempts
- * to [un]register the driver from occuring at the same time. @drv has to
+ * to [un]register the driver from occurring at the same time. @drv has to
* match the currently registered driver.
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
+ bool enabled = (cpuidle_get_driver() == drv);
+
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_driver(drv);
spin_unlock(&cpuidle_driver_lock);
+
+ if (!enabled)
+ return;
+
+ mutex_lock(&cpuidle_lock);
+ if (cpuidle_prev_governor) {
+ if (!cpuidle_switch_governor(cpuidle_prev_governor))
+ cpuidle_prev_governor = NULL;
+ }
+ mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
@@ -315,42 +364,39 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
/**
- * cpuidle_driver_ref - get a reference to the driver.
- *
- * Increment the reference counter of the cpuidle driver associated with
- * the current CPU.
- *
- * Returns a pointer to the driver, or NULL if the current CPU has no driver.
+ * cpuidle_driver_state_disabled - Disable or enable an idle state
+ * @drv: cpuidle driver owning the state
+ * @idx: State index
+ * @disable: Whether or not to disable the state
*/
-struct cpuidle_driver *cpuidle_driver_ref(void)
+void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable)
{
- struct cpuidle_driver *drv;
+ unsigned int cpu;
- spin_lock(&cpuidle_driver_lock);
+ mutex_lock(&cpuidle_lock);
- drv = cpuidle_get_driver();
- if (drv)
- drv->refcnt++;
+ spin_lock(&cpuidle_driver_lock);
- spin_unlock(&cpuidle_driver_lock);
- return drv;
-}
+ if (!drv->cpumask) {
+ drv->states[idx].flags |= CPUIDLE_FLAG_UNUSABLE;
+ goto unlock;
+ }
-/**
- * cpuidle_driver_unref - puts down the refcount for the driver
- *
- * Decrement the reference counter of the cpuidle driver associated with
- * the current CPU.
- */
-void cpuidle_driver_unref(void)
-{
- struct cpuidle_driver *drv;
+ for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
- spin_lock(&cpuidle_driver_lock);
+ if (!dev)
+ continue;
- drv = cpuidle_get_driver();
- if (drv && !WARN_ON(drv->refcnt <= 0))
- drv->refcnt--;
+ if (disable)
+ dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ else
+ dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ }
+unlock:
spin_unlock(&cpuidle_driver_lock);
+
+ mutex_unlock(&cpuidle_lock);
}