summaryrefslogtreecommitdiff
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/common.c203
-rw-r--r--drivers/base/power/main.c317
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/sysfs.c18
-rw-r--r--drivers/base/power/wakeirq.c30
-rw-r--r--drivers/base/power/wakeup.c11
7 files changed, 435 insertions, 181 deletions
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 44ec20918a4d..781968a128ff 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -168,6 +169,179 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
+ * dev_pm_domain_attach_list - Associate a device with its PM domains.
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * This function helps to attach a device to its multiple PM domains. The
+ * caller, which is typically a driver's probe function, may provide a list of
+ * names for the PM domains that we should try to attach the device to, but it
+ * may also provide an empty list, in case the attach should be done for all of
+ * the available PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure. Note that, to detach the list of PM domains, the driver shall call
+ * dev_pm_domain_detach_list(), typically during the remove phase.
+ */
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ struct device_node *np = dev->of_node;
+ struct dev_pm_domain_list *pds;
+ struct device *pd_dev = NULL;
+ int ret, i, num_pds = 0;
+ bool by_id = true;
+ size_t size;
+ u32 pd_flags = data ? data->pd_flags : 0;
+ u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ /* For now this is limited to OF based platforms. */
+ if (!np)
+ return 0;
+
+ if (data && data->pd_names) {
+ num_pds = data->num_pd_names;
+ by_id = false;
+ } else {
+ num_pds = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ }
+
+ if (num_pds <= 0)
+ return 0;
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
+ pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
+ if (!pds->pd_devs) {
+ ret = -ENOMEM;
+ goto free_pds;
+ }
+ pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
+
+ if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
+ link_flags |= DL_FLAG_RPM_ACTIVE;
+
+ for (i = 0; i < num_pds; i++) {
+ if (by_id)
+ pd_dev = dev_pm_domain_attach_by_id(dev, i);
+ else
+ pd_dev = dev_pm_domain_attach_by_name(dev,
+ data->pd_names[i]);
+ if (IS_ERR_OR_NULL(pd_dev)) {
+ ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV;
+ goto err_attach;
+ }
+
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
+ if (link_flags) {
+ struct device_link *link;
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ ret = -ENODEV;
+ goto err_link;
+ }
+
+ pds->pd_links[i] = link;
+ }
+
+ pds->pd_devs[i] = pd_dev;
+ }
+
+ pds->num_pds = num_pds;
+ *list = pds;
+ return num_pds;
+
+err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ dev_pm_domain_detach(pd_dev, true);
+err_attach:
+ while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ if (pds->pd_links[i])
+ device_link_del(pds->pd_links[i]);
+ dev_pm_domain_detach(pds->pd_devs[i], true);
+ }
+ kfree(pds->pd_devs);
+free_pds:
+ kfree(pds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
+
+/**
+ * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
+ * @_list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from devm_pm_domain_attach_list().
+ * it will be invoked during the remove phase from drivers implicitly if driver
+ * uses devm_pm_domain_attach_list() to attach the PM domains.
+ */
+static void devm_pm_domain_detach_list(void *_list)
+{
+ struct dev_pm_domain_list *list = _list;
+
+ dev_pm_domain_detach_list(list);
+}
+
+/**
+ * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * NOTE: this will also handle calling devm_pm_domain_detach_list() for
+ * you during remove phase.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure.
+ */
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ int ret, num_pds;
+
+ num_pds = dev_pm_domain_attach_list(dev, data, list);
+ if (num_pds <= 0)
+ return num_pds;
+
+ ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
+ if (ret)
+ return ret;
+
+ return num_pds;
+}
+EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
@@ -188,6 +362,35 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_detach_list - Detach a list of PM domains.
+ * @list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from dev_pm_domain_attach_list().
+ * Typically it should be invoked during the remove phase from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
+{
+ int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
+ if (list->pd_links[i])
+ device_link_del(list->pd_links[i]);
+ dev_pm_domain_detach(list->pd_devs[i], true);
+ }
+
+ kfree(list->pd_devs);
+ kfree(list);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
+
+/**
* dev_pm_domain_start - Start the device through its PM domain.
* @dev: Device to start.
*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fadcd0379dc2..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -209,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -224,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
return;
rettime = ktime_get();
- dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
@@ -497,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -513,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
+
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -531,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -578,6 +591,35 @@ bool dev_pm_skip_resume(struct device *dev)
return !dev->power.must_resume;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ dev->power.async_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+ }
+ /*
+ * Because async_schedule_dev_nocall() above has returned false or it
+ * has not been called at all, func() is not running and it is safe to
+ * update the async_in_progress flag without extra synchronization.
+ */
+ dev->power.async_in_progress = false;
+ return false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -614,13 +656,15 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.set_active flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume)
+ if (skip_resume) {
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ } else if (dev->power.set_active) {
pm_runtime_set_active(dev);
+ dev->power.set_active = false;
+ }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -657,42 +701,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
-static bool dpm_async_fn(struct device *dev, async_func_t func)
-{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
-
- get_device(dev);
-
- if (async_schedule_dev_nocall(func, dev))
- return true;
-
- put_device(dev);
- }
- /*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
- */
- dev->power.async_in_progress = false;
- return false;
-}
-
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
@@ -707,9 +721,12 @@ static void dpm_noirq_resume_devices(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -736,6 +753,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -817,8 +837,7 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
@@ -842,9 +861,12 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -871,6 +893,9 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -905,7 +930,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -974,8 +999,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
+ async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1004,10 +1028,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@@ -1017,29 +1042,25 @@ void dpm_resume(pm_message_t state)
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
-
- get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dev->power.async_in_progress) {
+ get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
+ put_device(dev);
+
mutex_lock(&dpm_list_mtx);
}
-
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
-
- mutex_unlock(&dpm_list_mtx);
-
- put_device(dev);
-
- mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (async_error)
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1187,7 +1208,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
}
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1195,7 +1216,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1240,6 +1261,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
@@ -1257,8 +1280,14 @@ Skip:
dev->power.may_skip_resume))
dev->power.must_resume = true;
- if (dev->power.must_resume)
+ if (dev->power.must_resume) {
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
dpm_superior_set_must_resume(dev);
+ }
Complete:
complete_all(&dev->power.completion);
@@ -1269,54 +1298,37 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_noirq(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_noirq))
- return 0;
-
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
- get_device(dev);
- mutex_unlock(&dpm_list_mtx);
-
- error = device_suspend_noirq(dev);
+ list_move(&dev->power.entry, &dpm_noirq_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_noirq_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_noirq(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1324,15 +1336,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1375,14 +1388,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
}
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1434,6 +1447,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1450,24 +1465,11 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend_late(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend_late))
- return 0;
-
- return __device_suspend_late(dev, pm_transition, false);
-}
-
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -1478,32 +1480,28 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- wake_up_all_idle_cpus();
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
-
- get_device(dev);
+ wake_up_all_idle_cpus();
- mutex_unlock(&dpm_list_mtx);
+ mutex_lock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
- mutex_lock(&dpm_list_mtx);
+ list_move(&dev->power.entry, &dpm_late_early_list);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend_late(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1511,12 +1509,14 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
+
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1597,12 +1597,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
}
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static int device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1716,8 +1716,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
+ if (error) {
async_error = error;
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
@@ -1727,25 +1730,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- int error;
-
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ device_suspend(dev, pm_transition, true);
put_device(dev);
}
-static int device_suspend(struct device *dev)
-{
- if (dpm_async_fn(dev, async_suspend))
- return 0;
-
- return __device_suspend(dev, pm_transition, false);
-}
-
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1761,29 +1750,25 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
- while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
- get_device(dev);
+ mutex_lock(&dpm_list_mtx);
- mutex_unlock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
- error = device_suspend(dev);
+ list_move(&dev->power.entry, &dpm_suspended_list);
- mutex_lock(&dpm_list_mtx);
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- } else if (!list_empty(&dev->power.entry)) {
- list_move(&dev->power.entry, &dpm_suspended_list);
- }
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+ error = device_suspend(dev, state, false);
+
put_device(dev);
mutex_lock(&dpm_list_mtx);
@@ -1791,14 +1776,16 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
if (!error)
error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
@@ -1949,11 +1936,11 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else
error = dpm_suspend(state);
+
dpm_show_time(starttime, state, error, "start");
return error;
}
@@ -1962,7 +1949,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index bd77f6734f14..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 05793c9fbb84..2ee45841486b 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -94,6 +94,7 @@ static void update_pm_runtime_accounting(struct device *dev)
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
@@ -1176,7 +1177,7 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
* @ign_usage_count: Whether or not to look at the current usage counter value.
*
@@ -1197,7 +1198,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
* The caller is responsible for decrementing the runtime PM usage counter of
* @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
@@ -1218,9 +1219,40 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
+ * it returns 1. If the device is in a different state or its usage_count is 0,
+ * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
+ * in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a1474fb67db9..f84018125b46 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -509,14 +508,6 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return sysfs_emit(buf, "%lld\n", msec);
}
-static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
- kgid_t kgid)
-{
- if (dev->power.wakeup && dev->power.wakeup->dev)
- return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
- return 0;
-}
-
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -541,6 +532,15 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 42171f766dcb..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
@@ -313,8 +339,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a917219feea6..752b417e8129 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);