summaryrefslogtreecommitdiff
path: root/drivers/base/power/runtime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r--drivers/base/power/runtime.c385
1 files changed, 275 insertions, 110 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 50e726b6c2cf..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
+#include <linux/rculist.h>
#include <trace/events/rpm.h>
#include "../base.h"
@@ -18,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -35,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -77,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev)
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
- * timesptamp.
+ * timestamp.
*/
if (now < last)
return;
@@ -93,6 +106,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
@@ -203,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data)
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
- * device becomes active and the involed page I/O finishes. The
+ * device becomes active and the involved page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
@@ -288,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -446,8 +460,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -468,11 +493,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle_rcuidle(dev, rpmflags);
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -508,7 +536,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
@@ -530,7 +558,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -562,7 +590,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend_rcuidle(dev, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -713,7 +741,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
@@ -723,21 +751,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -765,7 +790,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume_rcuidle(dev, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
@@ -774,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -935,7 +962,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -1001,7 +1028,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1091,7 +1118,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1129,7 +1156,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1175,16 +1202,18 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
- * without changing the usage counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
@@ -1196,7 +1225,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
@@ -1206,20 +1235,58 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
- } else if (ign_usage_count) {
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
@@ -1400,47 +1467,48 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1499,6 +1567,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1512,6 +1584,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1534,13 +1632,34 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
+ * Resume @dev if already suspended and block runtime suspend of @dev in such
+ * a way that it can be unblocked via the /sys/devices/.../power/control
+ * interface, or otherwise by calling pm_runtime_allow().
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_forbid(struct device *dev)
{
@@ -1561,7 +1680,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
- * Decrease the device's usage count and set its power.runtime_auto flag.
+ * Unblock runtime suspend of @dev after it has been blocked by
+ * pm_runtime_forbid() (for instance, if it has been blocked via the
+ * /sys/devices/.../power/control interface), check if @dev can be
+ * suspended and suspend it in that case.
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_allow(struct device *dev)
{
@@ -1576,7 +1701,7 @@ void pm_runtime_allow(struct device *dev)
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
- trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+ trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1646,7 +1771,7 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
}
}
@@ -1727,12 +1852,12 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.needs_force_resume = 0;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1754,6 +1879,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1777,9 +1907,8 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- if (link->flags & DL_FLAG_PM_RUNTIME) {
+ dev_for_each_link_to_supplier(link, dev)
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
@@ -1833,7 +1962,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
*/
void pm_runtime_drop_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
@@ -1841,13 +1970,23 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1871,10 +2010,10 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
@@ -1886,15 +2025,16 @@ int pm_runtime_force_suspend(struct device *dev)
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
- if (pm_runtime_need_not_resume(dev)) {
+ if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
- } else {
- __update_runtime_status(dev, RPM_SUSPENDED);
- dev->power.needs_force_resume = 1;
- }
+ else
+ dev->power.needs_force_resume = true;
return 0;
@@ -1905,33 +2045,37 @@ err:
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
+ *
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
*
- * Typically this function may be invoked from a system resume callback.
+ * In other cases, the resume is deferred to be managed via runtime PM.
+ *
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
@@ -1942,9 +2086,30 @@ int pm_runtime_force_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+
out:
- dev->power.needs_force_resume = 0;
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */