summaryrefslogtreecommitdiff
path: root/drivers/base/power/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r--drivers/base/power/main.c1427
1 files changed, 832 insertions, 595 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0992e67e862b..97a8b4fcf471 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
- * This file is released under the GPLv2
- *
- *
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
@@ -17,6 +15,9 @@
* subsystem list maintains.
*/
+#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -31,9 +32,9 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
-#include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
+#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
@@ -56,12 +57,26 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -81,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
+ case PM_EVENT_POWEROFF:
+ return "poweroff";
default:
return "(unknown PM event)";
}
@@ -124,7 +141,11 @@ void device_pm_unlock(void)
*/
void device_pm_add(struct device *dev)
{
- pr_debug("PM: Adding info for %s:%s\n",
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
@@ -142,7 +163,10 @@ void device_pm_add(struct device *dev)
*/
void device_pm_remove(struct device *dev)
{
- pr_debug("PM: Removing info for %s:%s\n",
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
@@ -161,7 +185,7 @@ void device_pm_remove(struct device *dev)
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
+ pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
@@ -175,7 +199,7 @@ void device_pm_move_before(struct device *deva, struct device *devb)
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
+ pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
@@ -188,7 +212,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
*/
void device_pm_move_last(struct device *dev)
{
- pr_debug("PM: Moving %s:%s to end of list\n",
+ pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -198,7 +222,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -208,16 +232,13 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
void *cb, int error)
{
ktime_t rettime;
- s64 nsecs;
if (!pm_print_times_enabled)
return;
rettime = ktime_get();
- nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
-
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
- (unsigned long long)nsecs >> 10);
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
+ (unsigned long long)ktime_us_delta(rettime, calltime));
}
/**
@@ -242,7 +263,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -259,17 +280,46 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
{
- dpm_wait(dev->parent, async);
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -288,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -319,12 +370,12 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw;
- break;
case PM_EVENT_RESTORE:
return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
@@ -354,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -388,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -403,16 +456,16 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
{
- dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
+ dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
- ", may wakeup" : "");
+ ", may wakeup" : "", dev->power.driver_flags);
}
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -450,7 +503,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -462,14 +515,20 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
+static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
+module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
+MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
+ "Backtrace all CPUs on DPM watchdog timeout");
+
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -477,12 +536,28 @@ struct dpm_watchdog {
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ unsigned int this_cpu = smp_processor_id();
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ if (dpm_watchdog_all_cpu_backtrace)
+ trigger_allbutcpu_cpu_backtrace(this_cpu);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
+
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -496,10 +571,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -511,8 +587,8 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
- del_timer_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_delete_sync(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -523,86 +599,121 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
- * Make the core skip the "early resume" and "resume" phases for @dev.
- *
- * This function can be called by middle-layer code during the "noirq" phase of
- * system resume if necessary, but not by device drivers.
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
*/
-void dev_pm_skip_next_resume_phases(struct device *dev)
+bool dev_pm_skip_resume(struct device *dev)
{
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
+
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
+
+ return !dev->power.must_resume;
}
-/**
- * suspend_event - Return a "suspend" message for given "resume" one.
- * @resume_msg: PM message representing a system-wide resume transition.
- */
-static pm_message_t suspend_event(pm_message_t resume_msg)
+static bool is_async(struct device *dev)
{
- switch (resume_msg.event) {
- case PM_EVENT_RESUME:
- return PMSG_SUSPEND;
- case PM_EVENT_THAW:
- case PM_EVENT_RESTORE:
- return PMSG_FREEZE;
- case PM_EVENT_RECOVER:
- return PMSG_HIBERNATE;
- }
- return PMSG_ON;
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
}
-/**
- * dev_pm_may_skip_resume - System-wide device resume optimization check.
- * @dev: Target device.
- *
- * Checks whether or not the device may be left in suspend after a system-wide
- * transition to the working state.
- */
-bool dev_pm_may_skip_resume(struct device *dev)
+static bool __dpm_async(struct device *dev, async_func_t func)
{
- return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+ if (dev->power.work_in_progress)
+ return true;
+
+ if (!is_async(dev))
+ return false;
+
+ dev->power.work_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+
+ return false;
}
-static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
+static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- pm_callback_t callback;
- const char *info;
+ guard(mutex)(&async_wip_mtx);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
+ return __dpm_async(dev, func);
+}
+
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
- if (info_p)
- *info_p = info;
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
- return callback;
+ return 0;
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
+ /*
+ * Prevent racing with dpm_clear_async_state() during initial list
+ * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
+ * dpm_resume().
+ */
+ guard(mutex)(&dpm_list_mtx);
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
+ /*
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ */
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
+}
+
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
+static bool dpm_root_device(struct device *dev)
+{
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie);
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
@@ -613,10 +724,10 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
bool skip_resume;
int error = 0;
@@ -626,42 +737,59 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
- skip_resume = dev_pm_may_skip_resume(dev);
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active" unless its power.smart_suspend flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_smart_suspend(dev))
+ pm_runtime_set_active(dev);
- callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
if (skip_resume)
goto Skip;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- pm_message_t suspend_msg = suspend_event(state);
-
- /*
- * If "freeze" callbacks have been skipped during a transition
- * related to hibernation, the subsequent "thaw" callbacks must
- * be skipped too or bad things may happen. Otherwise, resume
- * callbacks are going to be run for the device, so its runtime
- * PM status must be changed to reflect the new state after the
- * transition under way.
- */
- if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
- !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
- if (state.event == PM_EVENT_THAW) {
- skip_resume = true;
- goto Skip;
- } else {
- pm_runtime_set_active(dev);
- }
- }
- }
-
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
@@ -673,96 +801,72 @@ Run:
Skip:
dev->power.is_noirq_suspended = false;
- if (skip_resume) {
- /*
- * The device is going to be left in suspend, but it might not
- * have been in runtime suspend before the system suspended, so
- * its runtime PM status needs to be updated to avoid confusing
- * the runtime PM framework when runtime PM is enabled for the
- * device again.
- */
- pm_runtime_set_suspended(dev);
- dev_pm_skip_next_resume_phases(dev);
- }
-
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- return error;
-}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
-
- error = device_resume_noirq(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ struct device *dev = data;
+ device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
-void dpm_noirq_resume_devices(pm_message_t state)
+static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_noirq, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
+ get_device(dev);
- error = device_resume_noirq(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
- }
+ mutex_unlock(&dpm_list_mtx);
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
+ device_resume_noirq(dev, state, false);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
-}
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-void dpm_noirq_end(void)
-{
- resume_device_irqs();
- device_wakeup_disarm_wake_irqs();
- cpuidle_resume();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
@@ -775,38 +879,13 @@ void dpm_noirq_end(void)
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
- dpm_noirq_end();
-}
-static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -815,50 +894,77 @@ static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (dev->power.syscore)
+ goto Skip;
- callback = dpm_subsys_resume_early_cb(dev, state, &info);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
- Out:
+Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
- return error;
+
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
-
- error = device_resume_early(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ struct device *dev = data;
+ device_resume_early(dev, pm_transition, true);
put_device(dev);
}
@@ -872,45 +978,44 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_early, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
+ if (!dpm_async_fn(dev, async_resume_early)) {
+ get_device(dev);
- error = device_resume_early(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume_early(dev, state, false);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -925,13 +1030,15 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state, bool async)
+static void device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -944,13 +1051,27 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
+ dev->power.is_suspended = false;
+
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -960,9 +1081,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -1000,9 +1118,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
End:
error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1011,17 +1127,20 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
- return error;
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
+ struct device *dev = data;
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ device_resume(dev, pm_transition, true);
put_device(dev);
}
@@ -1038,45 +1157,43 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
+ */
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
- get_device(dev);
- if (!is_async(dev)) {
- int error;
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+ if (!dpm_async_fn(dev, async_resume)) {
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
+ device_resume(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
}
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
- put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1094,7 +1211,7 @@ static void device_complete(struct device *dev, pm_message_t state)
const char *info = NULL;
if (dev->power.syscore)
- return;
+ goto out;
device_lock(dev);
@@ -1124,6 +1241,9 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
+out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1139,7 +1259,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1149,14 +1268,16 @@ void dpm_complete(pm_message_t state)
get_device(dev);
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
+
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
- mutex_lock(&dpm_list_mtx);
put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
@@ -1176,6 +1297,7 @@ void dpm_complete(pm_message_t state)
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
+ pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
@@ -1183,6 +1305,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
+}
+
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return false;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1214,69 +1412,16 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
-static bool device_must_resume(struct device *dev, pm_message_t state,
- bool no_subsys_suspend_noirq)
-{
- pm_message_t resume_msg = resume_event(state);
-
- /*
- * If all of the device driver's "noirq", "late" and "early" callbacks
- * are invoked directly by the core, the decision to allow the device to
- * stay in suspend can be based on its current runtime PM status and its
- * wakeup settings.
- */
- if (no_subsys_suspend_noirq &&
- !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
- !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
- !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
- return !pm_runtime_status_suspended(dev) &&
- (resume_msg.event != PM_EVENT_RESUME ||
- (device_can_wakeup(dev) && !device_may_wakeup(dev)));
-
- /*
- * The only safe strategy here is to require that if the device may not
- * be left in suspend, resume callbacks must be invoked for it.
- */
- return !dev->power.may_skip_resume;
-}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1284,11 +1429,10 @@ static bool device_must_resume(struct device *dev, pm_message_t state,
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
- bool no_subsys_cb = false;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1296,24 +1440,29 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
- if (pm_wakeup_pending()) {
- async_error = -EBUSY;
- goto Complete;
- }
-
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
-
- if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1324,20 +1473,24 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
Skip:
dev->power.is_noirq_suspended = true;
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
- dev->power.must_resume = dev->power.must_resume ||
- atomic_read(&dev->power.usage_count) > 1 ||
- device_must_resume(dev, state, no_subsys_cb);
- } else {
+ /*
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
+ */
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
- }
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
@@ -1345,83 +1498,81 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
-}
-
-static void async_suspend_noirq(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ if (error || READ_ONCE(async_error))
+ return;
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
-static int device_suspend_noirq(struct device *dev)
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
+ struct device *dev = data;
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_noirq, dev);
- return 0;
- }
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
-void dpm_noirq_begin(void)
-{
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
+ device_suspend_noirq(dev, pm_transition, true);
+ put_device(dev);
}
-int dpm_noirq_suspend_devices(pm_message_t state)
+static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
+
+ list_move(&dev->power.entry, &dpm_noirq_list);
+
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev);
+ device_suspend_noirq(dev, state, false);
- mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
- break;
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (async_error)
+ mutex_lock(&dpm_list_mtx);
+
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ error = READ_ONCE(async_error);
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1438,7 +1589,9 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
- dpm_noirq_begin();
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
@@ -1455,79 +1608,70 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_lock_irq(&parent->power.lock);
- if (dev->power.wakeup_path && !parent->power.ignore_children)
+ if (device_wakeup_path(dev) && !parent->power.ignore_children)
parent->power.wakeup_path = true;
spin_unlock_irq(&parent->power.lock);
}
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
+static void async_suspend_late(void *data, async_cookie_t cookie);
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- if (dev_pm_smart_suspend_and_suspended(dev) &&
- !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1538,7 +1682,10 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1549,33 +1696,19 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
-}
-static void async_suspend_late(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
+ if (error || READ_ONCE(async_error))
+ return;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
-static int device_suspend_late(struct device *dev)
+static void async_suspend_late(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_late, dev);
- return 0;
- }
+ struct device *dev = data;
- return __device_suspend_late(dev, pm_transition, false);
+ device_suspend_late(dev, pm_transition, true);
+ put_device(dev);
}
/**
@@ -1585,42 +1718,63 @@ static int device_suspend_late(struct device *dev)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ wake_up_all_idle_cpus();
+
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
+
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ device_suspend_late(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
}
- put_device(dev);
-
- if (async_error)
- break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
+
+ error = READ_ONCE(async_error);
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1635,17 +1789,20 @@ int dpm_suspend_late(pm_message_t state)
*/
int dpm_suspend_end(pm_message_t state)
{
- int error = dpm_suspend_late(state);
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
if (error)
- return error;
+ goto out;
error = dpm_suspend_noirq(state);
- if (error) {
+ if (error)
dpm_resume_early(resume_event(state));
- return error;
- }
- return 0;
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -1668,7 +1825,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1688,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1697,13 +1854,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1715,42 +1874,53 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || device_wakeup_path(dev))
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
+ }
pm_runtime_enable(dev);
}
dev->power.direct_complete = false;
}
- dev->power.may_skip_resume = false;
- dev->power.must_resume = false;
+ dev->power.may_skip_resume = true;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1807,39 +1977,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
- async_error = error;
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
-}
-
-static void async_suspend(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ if (error || READ_ONCE(async_error))
+ return;
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend);
}
-static int device_suspend(struct device *dev)
+static void async_suspend(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend, dev);
- return 0;
- }
+ struct device *dev = data;
- return __device_suspend(dev, pm_transition, false);
+ device_suspend(dev, pm_transition, true);
+ put_device(dev);
}
/**
@@ -1849,7 +2007,8 @@ static int device_suspend(struct device *dev)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1857,43 +2016,103 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
+
+ list_move(&dev->power.entry, &dpm_suspended_list);
+
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev);
+ device_suspend(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
+
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
}
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_suspended_list);
- put_device(dev);
- if (async_error)
- break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ error = READ_ONCE(async_error);
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ dev_for_each_link_to_supplier(link, dev) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1905,15 +2124,9 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
- if (dev->power.syscore)
- return 0;
-
- WARN_ON(!pm_runtime_enabled(dev) &&
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED));
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1921,10 +2134,21 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
+
+ if (dev->power.syscore)
+ return 0;
device_lock(dev);
dev->power.wakeup_path = false;
+ dev->power.out_band_wakeup = false;
if (dev->power.no_pm_callbacks)
goto unlock;
@@ -1948,10 +2172,17 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1959,12 +2190,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
- ((pm_runtime_suspended(dev) && ret > 0) ||
- dev->power.no_pm_callbacks) &&
- !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+ (ret > 0 || dev->power.no_pm_callbacks) &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -1979,7 +2210,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
@@ -1996,10 +2226,11 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
+ while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
@@ -2007,22 +2238,23 @@ int dpm_prepare(pm_message_t state)
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
- if (error) {
- if (error == -EAGAIN) {
- put_device(dev);
- error = 0;
- continue;
- }
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
- dev_name(dev), error);
- put_device(dev);
- break;
+
+ if (!error) {
+ dev->power.is_prepared = true;
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ } else if (error == -EAGAIN) {
+ error = 0;
+ } else {
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
}
- dev->power.is_prepared = true;
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+ mutex_unlock(&dpm_list_mtx);
+
put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
@@ -2038,29 +2270,33 @@ int dpm_prepare(pm_message_t state)
*/
int dpm_suspend_start(pm_message_t state)
{
+ ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else {
+ pm_restrict_gfp_mask();
error = dpm_suspend(state);
+ }
+
+ dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
@@ -2108,7 +2344,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2117,11 +2355,10 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
-bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}