summaryrefslogtreecommitdiff
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile5
-rw-r--r--drivers/base/power/clock_ops.c311
-rw-r--r--drivers/base/power/common.c267
-rw-r--r--drivers/base/power/domain.c2990
-rw-r--r--drivers/base/power/domain_governor.c259
-rw-r--r--drivers/base/power/generic_ops.c113
-rw-r--r--drivers/base/power/main.c1427
-rw-r--r--drivers/base/power/power.h24
-rw-r--r--drivers/base/power/qos-test.c117
-rw-r--r--drivers/base/power/qos.c147
-rw-r--r--drivers/base/power/runtime-test.c249
-rw-r--r--drivers/base/power/runtime.c976
-rw-r--r--drivers/base/power/sysfs.c270
-rw-r--r--drivers/base/power/trace.c26
-rw-r--r--drivers/base/power/wakeirq.c178
-rw-r--r--drivers/base/power/wakeup.c327
-rw-r--r--drivers/base/power/wakeup_stats.c219
17 files changed, 3232 insertions, 4673 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index e1bb691cf8f1..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
-obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
+obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5a42ae4078c2..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
@@ -13,6 +12,7 @@
#include <linux/pm_clock.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/of_clk.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pm_domain.h>
@@ -23,6 +23,7 @@
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
@@ -32,10 +33,114 @@ struct pm_clock_entry {
char *con_id;
struct clk *clk;
enum pce_status status;
+ bool enabled_when_prepared;
};
/**
- * pm_clk_enable - Enable a clock, reporting any errors
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
+ * __pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
*/
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
{
int ret;
- if (ce->status < PCE_STATUS_ERROR) {
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
- if (!ret)
- ce->status = PCE_STATUS_ENABLED;
- else
- dev_err(dev, "%s: failed to enable clk %p, error %d\n",
- __func__, ce->clk, ret);
+ break;
+ default:
+ return;
}
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
}
/**
@@ -64,12 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
- } else {
- clk_prepare(ce->clk);
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
+ } else {
+ ce->status = PCE_STATUS_PREPARED;
}
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
@@ -88,8 +208,6 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
if (con_id) {
ce->con_id = kstrdup(con_id, GFP_KERNEL);
if (!ce->con_id) {
- dev_err(dev,
- "Not enough memory for clock connection ID.\n");
kfree(ce);
return -ENOMEM;
}
@@ -103,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
pm_clk_acquire(dev, ce);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
return 0;
}
@@ -139,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -191,8 +278,7 @@ int of_pm_clk_add_clks(struct device *dev)
if (!dev || !dev->of_node)
return -EINVAL;
- count = of_count_phandle_with_args(dev->of_node, "clocks",
- "#clock-cells");
+ count = of_clk_get_parent_count(dev->of_node);
if (count <= 0)
return -ENODEV;
@@ -237,14 +323,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (!ce)
return;
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED) {
- clk_unprepare(ce->clk);
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
clk_put(ce->clk);
- }
+ break;
+ default:
+ break;
}
kfree(ce->con_id);
@@ -252,44 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- spin_lock_irq(&psd->lock);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- spin_unlock_irq(&psd->lock);
- return;
-
- remove:
- list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
@@ -305,19 +359,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
if (!psd || !clk)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -328,13 +384,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
+ * object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
+ if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
}
EXPORT_SYMBOL_GPL(pm_clk_init);
@@ -370,12 +429,13 @@ void pm_clk_destroy(struct device *dev)
INIT_LIST_HEAD(&list);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
@@ -386,6 +446,23 @@ void pm_clk_destroy(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_clk_destroy);
+static void pm_clk_destroy_action(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+int devm_pm_clk_create(struct device *dev)
+{
+ int ret;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_clk_create);
+
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
@@ -395,23 +472,30 @@ int pm_clk_suspend(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
+ ce->status = PCE_STATUS_PREPARED;
+ }
}
}
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -426,18 +510,21 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -633,7 +720,7 @@ static int pm_clk_notify(struct notifier_block *nb,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
-void pm_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b413951c6abc..6ecf9ce4a4e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/common.c - Common device power management code.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -13,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -84,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
- * @power_on: Used to indicate whether we should power on the device.
+ * @flags: indicate whether we should power on/off the device on attach/detach
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
@@ -101,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
-int dev_pm_domain_attach(struct device *dev, bool power_on)
+int dev_pm_domain_attach(struct device *dev, u32 flags)
{
int ret;
if (dev->pm_domain)
return 0;
- ret = acpi_dev_pm_attach(dev, power_on);
+ ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON));
if (!ret)
ret = genpd_dev_pm_attach(dev);
+ if (dev->pm_domain)
+ dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF);
+
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
@@ -160,7 +162,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
* For a detailed function description, see dev_pm_domain_attach_by_id().
*/
struct device *dev_pm_domain_attach_by_name(struct device *dev,
- char *name)
+ const char *name)
{
if (dev->pm_domain)
return ERR_PTR(-EEXIST);
@@ -170,14 +172,187 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
/**
+ * dev_pm_domain_attach_list - Associate a device with its PM domains.
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * This function helps to attach a device to its multiple PM domains. The
+ * caller, which is typically a driver's probe function, may provide a list of
+ * names for the PM domains that we should try to attach the device to, but it
+ * may also provide an empty list, in case the attach should be done for all of
+ * the available PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure. Note that, to detach the list of PM domains, the driver shall call
+ * dev_pm_domain_detach_list(), typically during the remove phase.
+ */
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ struct device_node *np = dev->of_node;
+ struct dev_pm_domain_list *pds;
+ struct device *pd_dev = NULL;
+ int ret, i, num_pds = 0;
+ bool by_id = true;
+ size_t size;
+ u32 pd_flags = data ? data->pd_flags : 0;
+ u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ /* For now this is limited to OF based platforms. */
+ if (!np)
+ return 0;
+
+ if (data && data->pd_names) {
+ num_pds = data->num_pd_names;
+ by_id = false;
+ } else {
+ num_pds = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+ }
+
+ if (num_pds <= 0)
+ return 0;
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
+ pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
+ if (!pds->pd_devs) {
+ ret = -ENOMEM;
+ goto free_pds;
+ }
+ pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
+
+ if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
+ link_flags |= DL_FLAG_RPM_ACTIVE;
+
+ for (i = 0; i < num_pds; i++) {
+ if (by_id)
+ pd_dev = dev_pm_domain_attach_by_id(dev, i);
+ else
+ pd_dev = dev_pm_domain_attach_by_name(dev,
+ data->pd_names[i]);
+ if (IS_ERR_OR_NULL(pd_dev)) {
+ ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV;
+ goto err_attach;
+ }
+
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
+ if (link_flags) {
+ struct device_link *link;
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ ret = -ENODEV;
+ goto err_link;
+ }
+
+ pds->pd_links[i] = link;
+ }
+
+ pds->pd_devs[i] = pd_dev;
+ }
+
+ pds->num_pds = num_pds;
+ *list = pds;
+ return num_pds;
+
+err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ dev_pm_domain_detach(pd_dev, true);
+err_attach:
+ while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
+ if (pds->pd_links[i])
+ device_link_del(pds->pd_links[i]);
+ dev_pm_domain_detach(pds->pd_devs[i], true);
+ }
+ kfree(pds->pd_devs);
+free_pds:
+ kfree(pds);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
+
+/**
+ * devm_pm_domain_detach_list - devres-enabled version of dev_pm_domain_detach_list.
+ * @_list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from devm_pm_domain_attach_list().
+ * it will be invoked during the remove phase from drivers implicitly if driver
+ * uses devm_pm_domain_attach_list() to attach the PM domains.
+ */
+static void devm_pm_domain_detach_list(void *_list)
+{
+ struct dev_pm_domain_list *list = _list;
+
+ dev_pm_domain_detach_list(list);
+}
+
+/**
+ * devm_pm_domain_attach_list - devres-enabled version of dev_pm_domain_attach_list
+ * @dev: The device used to lookup the PM domains for.
+ * @data: The data used for attaching to the PM domains.
+ * @list: An out-parameter with an allocated list of attached PM domains.
+ *
+ * NOTE: this will also handle calling devm_pm_domain_detach_list() for
+ * you during remove phase.
+ *
+ * Returns the number of attached PM domains or a negative error code in case of
+ * a failure.
+ */
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ int ret, num_pds;
+
+ num_pds = dev_pm_domain_attach_list(dev, data, list);
+ if (num_pds <= 0)
+ return num_pds;
+
+ ret = devm_add_action_or_reset(dev, devm_pm_domain_detach_list, *list);
+ if (ret)
+ return ret;
+
+ return num_pds;
+}
+EXPORT_SYMBOL_GPL(devm_pm_domain_attach_list);
+
+/**
* dev_pm_domain_detach - Detach a device from its PM domain.
* @dev: Device to detach.
* @power_off: Used to indicate whether we should power off the device.
*
- * This functions will reverse the actions from dev_pm_domain_attach() and
- * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain.
- * Typically it should be invoked during the remove phase, either from
- * subsystem level code or from drivers.
+ * This functions will reverse the actions from dev_pm_domain_attach(),
+ * dev_pm_domain_attach_by_id() and dev_pm_domain_attach_by_name(), thus it
+ * detaches @dev from its PM domain. Typically it should be invoked during the
+ * remove phase, either from subsystem level code or from drivers.
*
* Callers must ensure proper synchronization of this function with power
* management callbacks.
@@ -190,6 +365,55 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_detach_list - Detach a list of PM domains.
+ * @list: The list of PM domains to detach.
+ *
+ * This function reverse the actions from dev_pm_domain_attach_list().
+ * Typically it should be invoked during the remove phase from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
+{
+ int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
+ if (list->pd_links[i])
+ device_link_del(list->pd_links[i]);
+ dev_pm_domain_detach(list->pd_devs[i], true);
+ }
+
+ kfree(list->pd_devs);
+ kfree(list);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
+
+/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
* dev_pm_domain_set - Set PM domain of a device.
* @dev: Device whose PM domain is to be set.
* @pd: PM domain to be set, or NULL.
@@ -210,3 +434,24 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
device_pm_check_callbacks(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_set);
+
+/**
+ * dev_pm_domain_set_performance_state - Request a new performance state.
+ * @dev: The device to make the request for.
+ * @state: Target performance state for the device.
+ *
+ * This function should be called when a new performance state needs to be
+ * requested for a device that is attached to a PM domain. Note that, the
+ * support for performance scaling for PM domains is optional.
+ *
+ * Returns 0 on success and when performance scaling isn't supported, negative
+ * error code on failure.
+ */
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state)
+{
+ if (dev->pm_domain && dev->pm_domain->set_performance_state)
+ return dev->pm_domain->set_performance_state(dev, state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set_performance_state);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
deleted file mode 100644
index 500de1dee967..000000000000
--- a/drivers/base/power/domain.c
+++ /dev/null
@@ -1,2990 +0,0 @@
-/*
- * drivers/base/power/domain.c - Common code related to device power domains.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/pm_opp.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_qos.h>
-#include <linux/pm_clock.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/suspend.h>
-#include <linux/export.h>
-
-#include "power.h"
-
-#define GENPD_RETRY_MAX_MS 250 /* Approximate */
-
-#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
-({ \
- type (*__routine)(struct device *__d); \
- type __ret = (type)0; \
- \
- __routine = genpd->dev_ops.callback; \
- if (__routine) { \
- __ret = __routine(dev); \
- } \
- __ret; \
-})
-
-static LIST_HEAD(gpd_list);
-static DEFINE_MUTEX(gpd_list_lock);
-
-struct genpd_lock_ops {
- void (*lock)(struct generic_pm_domain *genpd);
- void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
- int (*lock_interruptible)(struct generic_pm_domain *genpd);
- void (*unlock)(struct generic_pm_domain *genpd);
-};
-
-static void genpd_lock_mtx(struct generic_pm_domain *genpd)
-{
- mutex_lock(&genpd->mlock);
-}
-
-static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
- int depth)
-{
- mutex_lock_nested(&genpd->mlock, depth);
-}
-
-static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
-{
- return mutex_lock_interruptible(&genpd->mlock);
-}
-
-static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
-{
- return mutex_unlock(&genpd->mlock);
-}
-
-static const struct genpd_lock_ops genpd_mtx_ops = {
- .lock = genpd_lock_mtx,
- .lock_nested = genpd_lock_nested_mtx,
- .lock_interruptible = genpd_lock_interruptible_mtx,
- .unlock = genpd_unlock_mtx,
-};
-
-static void genpd_lock_spin(struct generic_pm_domain *genpd)
- __acquires(&genpd->slock)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&genpd->slock, flags);
- genpd->lock_flags = flags;
-}
-
-static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
- int depth)
- __acquires(&genpd->slock)
-{
- unsigned long flags;
-
- spin_lock_irqsave_nested(&genpd->slock, flags, depth);
- genpd->lock_flags = flags;
-}
-
-static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
- __acquires(&genpd->slock)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&genpd->slock, flags);
- genpd->lock_flags = flags;
- return 0;
-}
-
-static void genpd_unlock_spin(struct generic_pm_domain *genpd)
- __releases(&genpd->slock)
-{
- spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
-}
-
-static const struct genpd_lock_ops genpd_spin_ops = {
- .lock = genpd_lock_spin,
- .lock_nested = genpd_lock_nested_spin,
- .lock_interruptible = genpd_lock_interruptible_spin,
- .unlock = genpd_unlock_spin,
-};
-
-#define genpd_lock(p) p->lock_ops->lock(p)
-#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
-#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
-#define genpd_unlock(p) p->lock_ops->unlock(p)
-
-#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
-#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
-#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
-#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
-
-static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
- const struct generic_pm_domain *genpd)
-{
- bool ret;
-
- ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
-
- /*
- * Warn once if an IRQ safe device is attached to a no sleep domain, as
- * to indicate a suboptimal configuration for PM. For an always on
- * domain this isn't case, thus don't warn.
- */
- if (ret && !genpd_is_always_on(genpd))
- dev_warn_once(dev, "PM domain %s will not be powered off\n",
- genpd->name);
-
- return ret;
-}
-
-/*
- * Get the generic PM domain for a particular struct device.
- * This validates the struct device pointer, the PM domain pointer,
- * and checks that the PM domain pointer is a real generic PM domain.
- * Any failure results in NULL being returned.
- */
-static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
- return NULL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (&gpd->domain == dev->pm_domain) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
- return genpd;
-}
-
-/*
- * This should only be used where we are certain that the pm_domain
- * attached to the device is a genpd domain.
- */
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
-{
- if (IS_ERR_OR_NULL(dev->pm_domain))
- return ERR_PTR(-EINVAL);
-
- return pd_to_genpd(dev->pm_domain);
-}
-
-static int genpd_stop_dev(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
-}
-
-static int genpd_start_dev(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, start, dev);
-}
-
-static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
-{
- bool ret = false;
-
- if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
- ret = !!atomic_dec_and_test(&genpd->sd_count);
-
- return ret;
-}
-
-static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
-{
- atomic_inc(&genpd->sd_count);
- smp_mb__after_atomic();
-}
-
-#ifdef CONFIG_DEBUG_FS
-static void genpd_update_accounting(struct generic_pm_domain *genpd)
-{
- ktime_t delta, now;
-
- now = ktime_get();
- delta = ktime_sub(now, genpd->accounting_time);
-
- /*
- * If genpd->status is active, it means we are just
- * out of off and so update the idle time and vice
- * versa.
- */
- if (genpd->status == GPD_STATE_ACTIVE) {
- int state_idx = genpd->state_idx;
-
- genpd->states[state_idx].idle_time =
- ktime_add(genpd->states[state_idx].idle_time, delta);
- } else {
- genpd->on_time = ktime_add(genpd->on_time, delta);
- }
-
- genpd->accounting_time = now;
-}
-#else
-static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
-#endif
-
-static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
- unsigned int state)
-{
- struct generic_pm_domain_data *pd_data;
- struct pm_domain_data *pdd;
- struct gpd_link *link;
-
- /* New requested state is same as Max requested state */
- if (state == genpd->performance_state)
- return state;
-
- /* New requested state is higher than Max requested state */
- if (state > genpd->performance_state)
- return state;
-
- /* Traverse all devices within the domain */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- pd_data = to_gpd_data(pdd);
-
- if (pd_data->performance_state > state)
- state = pd_data->performance_state;
- }
-
- /*
- * Traverse all sub-domains within the domain. This can be
- * done without any additional locking as the link->performance_state
- * field is protected by the master genpd->lock, which is already taken.
- *
- * Also note that link->performance_state (subdomain's performance state
- * requirement to master domain) is different from
- * link->slave->performance_state (current performance state requirement
- * of the devices/sub-domains of the subdomain) and so can have a
- * different value.
- *
- * Note that we also take vote from powered-off sub-domains into account
- * as the same is done for devices right now.
- */
- list_for_each_entry(link, &genpd->master_links, master_node) {
- if (link->performance_state > state)
- state = link->performance_state;
- }
-
- return state;
-}
-
-static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
- unsigned int state, int depth)
-{
- struct generic_pm_domain *master;
- struct gpd_link *link;
- int master_state, ret;
-
- if (state == genpd->performance_state)
- return 0;
-
- /* Propagate to masters of genpd */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- master = link->master;
-
- if (!master->set_performance_state)
- continue;
-
- /* Find master's performance state */
- ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- master->opp_table,
- state);
- if (unlikely(ret < 0))
- goto err;
-
- master_state = ret;
-
- genpd_lock_nested(master, depth + 1);
-
- link->prev_performance_state = link->performance_state;
- link->performance_state = master_state;
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- ret = _genpd_set_performance_state(master, master_state, depth + 1);
- if (ret)
- link->performance_state = link->prev_performance_state;
-
- genpd_unlock(master);
-
- if (ret)
- goto err;
- }
-
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
-
- genpd->performance_state = state;
- return 0;
-
-err:
- /* Encountered an error, lets rollback */
- list_for_each_entry_continue_reverse(link, &genpd->slave_links,
- slave_node) {
- master = link->master;
-
- if (!master->set_performance_state)
- continue;
-
- genpd_lock_nested(master, depth + 1);
-
- master_state = link->prev_performance_state;
- link->performance_state = master_state;
-
- master_state = _genpd_reeval_performance_state(master,
- master_state);
- if (_genpd_set_performance_state(master, master_state, depth + 1)) {
- pr_err("%s: Failed to roll back to %d performance state\n",
- master->name, master_state);
- }
-
- genpd_unlock(master);
- }
-
- return ret;
-}
-
-/**
- * dev_pm_genpd_set_performance_state- Set performance state of device's power
- * domain.
- *
- * @dev: Device for which the performance-state needs to be set.
- * @state: Target performance state of the device. This can be set as 0 when the
- * device doesn't have any performance state constraints left (And so
- * the device wouldn't participate anymore to find the target
- * performance state of the genpd).
- *
- * It is assumed that the users guarantee that the genpd wouldn't be detached
- * while this routine is getting called.
- *
- * Returns 0 on success and negative error values on failures.
- */
-int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
-{
- struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data;
- unsigned int prev;
- int ret;
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -ENODEV;
-
- if (unlikely(!genpd->set_performance_state))
- return -EINVAL;
-
- if (unlikely(!dev->power.subsys_data ||
- !dev->power.subsys_data->domain_data)) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- genpd_lock(genpd);
-
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- prev = gpd_data->performance_state;
- gpd_data->performance_state = state;
-
- state = _genpd_reeval_performance_state(genpd, state);
- ret = _genpd_set_performance_state(genpd, state, 0);
- if (ret)
- gpd_data->performance_state = prev;
-
- genpd_unlock(genpd);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
-
-static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
-{
- unsigned int state_idx = genpd->state_idx;
- ktime_t time_start;
- s64 elapsed_ns;
- int ret;
-
- if (!genpd->power_on)
- return 0;
-
- if (!timed)
- return genpd->power_on(genpd);
-
- time_start = ktime_get();
- ret = genpd->power_on(genpd);
- if (ret)
- return ret;
-
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
- return ret;
-
- genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
- pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "on", elapsed_ns);
-
- return ret;
-}
-
-static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
-{
- unsigned int state_idx = genpd->state_idx;
- ktime_t time_start;
- s64 elapsed_ns;
- int ret;
-
- if (!genpd->power_off)
- return 0;
-
- if (!timed)
- return genpd->power_off(genpd);
-
- time_start = ktime_get();
- ret = genpd->power_off(genpd);
- if (ret == -EBUSY)
- return ret;
-
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
- return ret;
-
- genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
- genpd->max_off_time_changed = true;
- pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
- genpd->name, "off", elapsed_ns);
-
- return ret;
-}
-
-/**
- * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
- * @genpd: PM domain to power off.
- *
- * Queue up the execution of genpd_power_off() unless it's already been done
- * before.
- */
-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
- queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
- * genpd_power_off - Remove power from a given PM domain.
- * @genpd: PM domain to power down.
- * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
- * RPM status of the releated device is in an intermediate state, not yet turned
- * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
- * be RPM_SUSPENDED, while it tries to power off the PM domain.
- *
- * If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, remove power from @genpd.
- */
-static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
- unsigned int depth)
-{
- struct pm_domain_data *pdd;
- struct gpd_link *link;
- unsigned int not_suspended = 0;
-
- /*
- * Do not try to power off the domain in the following situations:
- * (1) The domain is already in the "power off" state.
- * (2) System suspend is in progress.
- */
- if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
- return 0;
-
- /*
- * Abort power off for the PM domain in the following situations:
- * (1) The domain is configured as always on.
- * (2) When the domain has a subdomain being powered on.
- */
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
-
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- enum pm_qos_flags_status stat;
-
- stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
- if (stat > PM_QOS_FLAGS_NONE)
- return -EBUSY;
-
- /*
- * Do not allow PM domain to be powered off, when an IRQ safe
- * device is part of a non-IRQ safe domain.
- */
- if (!pm_runtime_suspended(pdd->dev) ||
- irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
- not_suspended++;
- }
-
- if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
- return -EBUSY;
-
- if (genpd->gov && genpd->gov->power_down_ok) {
- if (!genpd->gov->power_down_ok(&genpd->domain))
- return -EAGAIN;
- }
-
- /* Default to shallowest state. */
- if (!genpd->gov)
- genpd->state_idx = 0;
-
- if (genpd->power_off) {
- int ret;
-
- if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
-
- /*
- * If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the master yet after
- * incrementing it. In that case genpd_power_on() will wait
- * for us to drop the lock, so we can call .power_off() and let
- * the genpd_power_on() restore power for us (this shouldn't
- * happen very often).
- */
- ret = _genpd_power_off(genpd, true);
- if (ret)
- return ret;
- }
-
- genpd->status = GPD_STATE_POWER_OFF;
- genpd_update_accounting(genpd);
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
- }
-
- return 0;
-}
-
-/**
- * genpd_power_on - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- * @depth: nesting count for lockdep.
- *
- * Restore power to @genpd and all of its masters so that it is possible to
- * resume a device belonging to it.
- */
-static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
-{
- struct gpd_link *link;
- int ret = 0;
-
- if (genpd_status_on(genpd))
- return 0;
-
- /*
- * The list is guaranteed not to change while the loop below is being
- * executed, unless one of the masters' .power_on() callbacks fiddles
- * with it.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- struct generic_pm_domain *master = link->master;
-
- genpd_sd_counter_inc(master);
-
- genpd_lock_nested(master, depth + 1);
- ret = genpd_power_on(master, depth + 1);
- genpd_unlock(master);
-
- if (ret) {
- genpd_sd_counter_dec(master);
- goto err;
- }
- }
-
- ret = _genpd_power_on(genpd, true);
- if (ret)
- goto err;
-
- genpd->status = GPD_STATE_ACTIVE;
- genpd_update_accounting(genpd);
-
- return 0;
-
- err:
- list_for_each_entry_continue_reverse(link,
- &genpd->slave_links,
- slave_node) {
- genpd_sd_counter_dec(link->master);
- genpd_lock_nested(link->master, depth + 1);
- genpd_power_off(link->master, false, depth + 1);
- genpd_unlock(link->master);
- }
-
- return ret;
-}
-
-static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
- unsigned long val, void *ptr)
-{
- struct generic_pm_domain_data *gpd_data;
- struct device *dev;
-
- gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
- dev = gpd_data->base.dev;
-
- for (;;) {
- struct generic_pm_domain *genpd;
- struct pm_domain_data *pdd;
-
- spin_lock_irq(&dev->power.lock);
-
- pdd = dev->power.subsys_data ?
- dev->power.subsys_data->domain_data : NULL;
- if (pdd) {
- to_gpd_data(pdd)->td.constraint_changed = true;
- genpd = dev_to_genpd(dev);
- } else {
- genpd = ERR_PTR(-ENODATA);
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- if (!IS_ERR(genpd)) {
- genpd_lock(genpd);
- genpd->max_off_time_changed = true;
- genpd_unlock(genpd);
- }
-
- dev = dev->parent;
- if (!dev || dev->power.ignore_children)
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-/**
- * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
- * @work: Work structure used for scheduling the execution of this function.
- */
-static void genpd_power_off_work_fn(struct work_struct *work)
-{
- struct generic_pm_domain *genpd;
-
- genpd = container_of(work, struct generic_pm_domain, power_off_work);
-
- genpd_lock(genpd);
- genpd_power_off(genpd, false, 0);
- genpd_unlock(genpd);
-}
-
-/**
- * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
- * @dev: Device to handle.
- */
-static int __genpd_runtime_suspend(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_suspend;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_suspend;
-
- return cb ? cb(dev) : 0;
-}
-
-/**
- * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
- * @dev: Device to handle.
- */
-static int __genpd_runtime_resume(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_resume;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_resume;
-
- return cb ? cb(dev) : 0;
-}
-
-/**
- * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a runtime suspend of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int genpd_runtime_suspend(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- bool (*suspend_ok)(struct device *__dev);
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
- bool runtime_pm = pm_runtime_enabled(dev);
- ktime_t time_start;
- s64 elapsed_ns;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * A runtime PM centric subsystem/driver may re-use the runtime PM
- * callbacks for other purposes than runtime PM. In those scenarios
- * runtime PM is disabled. Under these circumstances, we shall skip
- * validating/measuring the PM QoS latency.
- */
- suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
- if (runtime_pm && suspend_ok && !suspend_ok(dev))
- return -EBUSY;
-
- /* Measure suspend latency. */
- time_start = 0;
- if (runtime_pm)
- time_start = ktime_get();
-
- ret = __genpd_runtime_suspend(dev);
- if (ret)
- return ret;
-
- ret = genpd_stop_dev(genpd, dev);
- if (ret) {
- __genpd_runtime_resume(dev);
- return ret;
- }
-
- /* Update suspend latency value if the measured time exceeds it. */
- if (runtime_pm) {
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns > td->suspend_latency_ns) {
- td->suspend_latency_ns = elapsed_ns;
- dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
- elapsed_ns);
- genpd->max_off_time_changed = true;
- td->constraint_changed = true;
- }
- }
-
- /*
- * If power.irq_safe is set, this routine may be run with
- * IRQs disabled, so suspend only if the PM domain also is irq_safe.
- */
- if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
- return 0;
-
- genpd_lock(genpd);
- genpd_power_off(genpd, true, 0);
- genpd_unlock(genpd);
-
- return 0;
-}
-
-/**
- * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
- * @dev: Device to resume.
- *
- * Carry out a runtime resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int genpd_runtime_resume(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
- bool runtime_pm = pm_runtime_enabled(dev);
- ktime_t time_start;
- s64 elapsed_ns;
- int ret;
- bool timed = true;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * As we don't power off a non IRQ safe domain, which holds
- * an IRQ safe device, we don't need to restore power to it.
- */
- if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
- timed = false;
- goto out;
- }
-
- genpd_lock(genpd);
- ret = genpd_power_on(genpd, 0);
- genpd_unlock(genpd);
-
- if (ret)
- return ret;
-
- out:
- /* Measure resume latency. */
- time_start = 0;
- if (timed && runtime_pm)
- time_start = ktime_get();
-
- ret = genpd_start_dev(genpd, dev);
- if (ret)
- goto err_poweroff;
-
- ret = __genpd_runtime_resume(dev);
- if (ret)
- goto err_stop;
-
- /* Update resume latency value if the measured time exceeds it. */
- if (timed && runtime_pm) {
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns > td->resume_latency_ns) {
- td->resume_latency_ns = elapsed_ns;
- dev_dbg(dev, "resume latency exceeded, %lld ns\n",
- elapsed_ns);
- genpd->max_off_time_changed = true;
- td->constraint_changed = true;
- }
- }
-
- return 0;
-
-err_stop:
- genpd_stop_dev(genpd, dev);
-err_poweroff:
- if (!pm_runtime_is_irq_safe(dev) ||
- (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
- genpd_lock(genpd);
- genpd_power_off(genpd, true, 0);
- genpd_unlock(genpd);
- }
-
- return ret;
-}
-
-static bool pd_ignore_unused;
-static int __init pd_ignore_unused_setup(char *__unused)
-{
- pd_ignore_unused = true;
- return 1;
-}
-__setup("pd_ignore_unused", pd_ignore_unused_setup);
-
-/**
- * genpd_power_off_unused - Power off all PM domains with no devices in use.
- */
-static int __init genpd_power_off_unused(void)
-{
- struct generic_pm_domain *genpd;
-
- if (pd_ignore_unused) {
- pr_warn("genpd: Not disabling unused power domains\n");
- return 0;
- }
-
- mutex_lock(&gpd_list_lock);
-
- list_for_each_entry(genpd, &gpd_list, gpd_list_node)
- genpd_queue_power_off_work(genpd);
-
- mutex_unlock(&gpd_list_lock);
-
- return 0;
-}
-late_initcall(genpd_power_off_unused);
-
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-
-static bool genpd_present(const struct generic_pm_domain *genpd)
-{
- const struct generic_pm_domain *gpd;
-
- if (IS_ERR_OR_NULL(genpd))
- return false;
-
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
- if (gpd == genpd)
- return true;
-
- return false;
-}
-
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-
-/**
- * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
- * @genpd: PM domain to power off, if possible.
- * @use_lock: use the lock.
- * @depth: nesting count for lockdep.
- *
- * Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so. Also, in that case propagate to its masters.
- *
- * This function is only called in "noirq" and "syscore" stages of system power
- * transitions. The "noirq" callbacks may be executed asynchronously, thus in
- * these cases the lock must be held.
- */
-static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
- unsigned int depth)
-{
- struct gpd_link *link;
-
- if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
- return;
-
- if (genpd->suspended_count != genpd->device_count
- || atomic_read(&genpd->sd_count) > 0)
- return;
-
- /* Choose the deepest state when suspending */
- genpd->state_idx = genpd->state_count - 1;
- if (_genpd_power_off(genpd, false))
- return;
-
- genpd->status = GPD_STATE_POWER_OFF;
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_dec(link->master);
-
- if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
-
- genpd_sync_power_off(link->master, use_lock, depth + 1);
-
- if (use_lock)
- genpd_unlock(link->master);
- }
-}
-
-/**
- * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
- * @genpd: PM domain to power on.
- * @use_lock: use the lock.
- * @depth: nesting count for lockdep.
- *
- * This function is only called in "noirq" and "syscore" stages of system power
- * transitions. The "noirq" callbacks may be executed asynchronously, thus in
- * these cases the lock must be held.
- */
-static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
- unsigned int depth)
-{
- struct gpd_link *link;
-
- if (genpd_status_on(genpd))
- return;
-
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
- genpd_sd_counter_inc(link->master);
-
- if (use_lock)
- genpd_lock_nested(link->master, depth + 1);
-
- genpd_sync_power_on(link->master, use_lock, depth + 1);
-
- if (use_lock)
- genpd_unlock(link->master);
- }
-
- _genpd_power_on(genpd, false);
-
- genpd->status = GPD_STATE_ACTIVE;
-}
-
-/**
- * resume_needed - Check whether to resume a device before system suspend.
- * @dev: Device to check.
- * @genpd: PM domain the device belongs to.
- *
- * There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by genpd_prepare(): (1) if the device is enabled
- * to wake up the system and it has to remain active for this purpose while the
- * system is in the sleep state and (2) if the device is not enabled to wake up
- * the system from sleep states and it generally doesn't generate wakeup signals
- * by itself (those signals are generated on its behalf by other parts of the
- * system). In the latter case it may be necessary to reconfigure the device's
- * wakeup settings during system suspend, because it may have been set up to
- * signal remote wakeup from the system's working state as needed by runtime PM.
- * Return 'true' in either of the above cases.
- */
-static bool resume_needed(struct device *dev,
- const struct generic_pm_domain *genpd)
-{
- bool active_wakeup;
-
- if (!device_can_wakeup(dev))
- return false;
-
- active_wakeup = genpd_is_active_wakeup(genpd);
- return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
-}
-
-/**
- * genpd_prepare - Start power transition of a device in a PM domain.
- * @dev: Device to start the transition of.
- *
- * Start a power transition of a device (during a system-wide power transition)
- * under the assumption that its pm_domain field points to the domain member of
- * an object of type struct generic_pm_domain representing a PM domain
- * consisting of I/O devices.
- */
-static int genpd_prepare(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * If a wakeup request is pending for the device, it should be woken up
- * at this point and a system wakeup event should be reported if it's
- * set up to wake up the system from sleep states.
- */
- if (resume_needed(dev, genpd))
- pm_runtime_resume(dev);
-
- genpd_lock(genpd);
-
- if (genpd->prepared_count++ == 0)
- genpd->suspended_count = 0;
-
- genpd_unlock(genpd);
-
- ret = pm_generic_prepare(dev);
- if (ret < 0) {
- genpd_lock(genpd);
-
- genpd->prepared_count--;
-
- genpd_unlock(genpd);
- }
-
- /* Never return 1, as genpd don't cope with the direct_complete path. */
- return ret >= 0 ? 0 : ret;
-}
-
-/**
- * genpd_finish_suspend - Completion of suspend or hibernation of device in an
- * I/O pm domain.
- * @dev: Device to suspend.
- * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
- *
- * Stop the device and remove power from the domain if all devices in it have
- * been stopped.
- */
-static int genpd_finish_suspend(struct device *dev, bool poweroff)
-{
- struct generic_pm_domain *genpd;
- int ret = 0;
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (poweroff)
- ret = pm_generic_poweroff_noirq(dev);
- else
- ret = pm_generic_suspend_noirq(dev);
- if (ret)
- return ret;
-
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
- if (genpd->dev_ops.stop && genpd->dev_ops.start &&
- !pm_runtime_status_suspended(dev)) {
- ret = genpd_stop_dev(genpd, dev);
- if (ret) {
- if (poweroff)
- pm_generic_restore_noirq(dev);
- else
- pm_generic_resume_noirq(dev);
- return ret;
- }
- }
-
- genpd_lock(genpd);
- genpd->suspended_count++;
- genpd_sync_power_off(genpd, true, 0);
- genpd_unlock(genpd);
-
- return 0;
-}
-
-/**
- * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Stop the device and remove power from the domain if all devices in it have
- * been stopped.
- */
-static int genpd_suspend_noirq(struct device *dev)
-{
- dev_dbg(dev, "%s()\n", __func__);
-
- return genpd_finish_suspend(dev, false);
-}
-
-/**
- * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Restore power to the device's PM domain, if necessary, and start the device.
- */
-static int genpd_resume_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return pm_generic_resume_noirq(dev);
-
- genpd_lock(genpd);
- genpd_sync_power_on(genpd, true, 0);
- genpd->suspended_count--;
- genpd_unlock(genpd);
-
- if (genpd->dev_ops.stop && genpd->dev_ops.start &&
- !pm_runtime_status_suspended(dev)) {
- ret = genpd_start_dev(genpd, dev);
- if (ret)
- return ret;
- }
-
- return pm_generic_resume_noirq(dev);
-}
-
-/**
- * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Carry out a late freeze of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int genpd_freeze_noirq(struct device *dev)
-{
- const struct generic_pm_domain *genpd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- ret = pm_generic_freeze_noirq(dev);
- if (ret)
- return ret;
-
- if (genpd->dev_ops.stop && genpd->dev_ops.start &&
- !pm_runtime_status_suspended(dev))
- ret = genpd_stop_dev(genpd, dev);
-
- return ret;
-}
-
-/**
- * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
- * @dev: Device to thaw.
- *
- * Start the device, unless power has been removed from the domain already
- * before the system transition.
- */
-static int genpd_thaw_noirq(struct device *dev)
-{
- const struct generic_pm_domain *genpd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->dev_ops.stop && genpd->dev_ops.start &&
- !pm_runtime_status_suspended(dev)) {
- ret = genpd_start_dev(genpd, dev);
- if (ret)
- return ret;
- }
-
- return pm_generic_thaw_noirq(dev);
-}
-
-/**
- * genpd_poweroff_noirq - Completion of hibernation of device in an
- * I/O PM domain.
- * @dev: Device to poweroff.
- *
- * Stop the device and remove power from the domain if all devices in it have
- * been stopped.
- */
-static int genpd_poweroff_noirq(struct device *dev)
-{
- dev_dbg(dev, "%s()\n", __func__);
-
- return genpd_finish_suspend(dev, true);
-}
-
-/**
- * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Make sure the domain will be in the same power state as before the
- * hibernation the system is resuming from and start the device if necessary.
- */
-static int genpd_restore_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- /*
- * At this point suspended_count == 0 means we are being run for the
- * first time for the given domain in the present cycle.
- */
- genpd_lock(genpd);
- if (genpd->suspended_count++ == 0)
- /*
- * The boot kernel might put the domain into arbitrary state,
- * so make it appear as powered off to genpd_sync_power_on(),
- * so that it tries to power it on in case it was really off.
- */
- genpd->status = GPD_STATE_POWER_OFF;
-
- genpd_sync_power_on(genpd, true, 0);
- genpd_unlock(genpd);
-
- if (genpd->dev_ops.stop && genpd->dev_ops.start &&
- !pm_runtime_status_suspended(dev)) {
- ret = genpd_start_dev(genpd, dev);
- if (ret)
- return ret;
- }
-
- return pm_generic_restore_noirq(dev);
-}
-
-/**
- * genpd_complete - Complete power transition of a device in a power domain.
- * @dev: Device to complete the transition of.
- *
- * Complete a power transition of a device (during a system-wide power
- * transition) under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static void genpd_complete(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return;
-
- pm_generic_complete(dev);
-
- genpd_lock(genpd);
-
- genpd->prepared_count--;
- if (!genpd->prepared_count)
- genpd_queue_power_off_work(genpd);
-
- genpd_unlock(genpd);
-}
-
-/**
- * genpd_syscore_switch - Switch power during system core suspend or resume.
- * @dev: Device that normally is marked as "always on" to switch power for.
- *
- * This routine may only be called during the system core (syscore) suspend or
- * resume phase for devices whose "always on" flags are set.
- */
-static void genpd_syscore_switch(struct device *dev, bool suspend)
-{
- struct generic_pm_domain *genpd;
-
- genpd = dev_to_genpd(dev);
- if (!genpd_present(genpd))
- return;
-
- if (suspend) {
- genpd->suspended_count++;
- genpd_sync_power_off(genpd, false, 0);
- } else {
- genpd_sync_power_on(genpd, false, 0);
- genpd->suspended_count--;
- }
-}
-
-void pm_genpd_syscore_poweroff(struct device *dev)
-{
- genpd_syscore_switch(dev, true);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
-
-void pm_genpd_syscore_poweron(struct device *dev)
-{
- genpd_syscore_switch(dev, false);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define genpd_prepare NULL
-#define genpd_suspend_noirq NULL
-#define genpd_resume_noirq NULL
-#define genpd_freeze_noirq NULL
-#define genpd_thaw_noirq NULL
-#define genpd_poweroff_noirq NULL
-#define genpd_restore_noirq NULL
-#define genpd_complete NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain_data *gpd_data;
- int ret;
-
- ret = dev_pm_get_subsys_data(dev);
- if (ret)
- return ERR_PTR(ret);
-
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- if (td)
- gpd_data->td = *td;
-
- gpd_data->base.dev = dev;
- gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data->domain_data) {
- ret = -EINVAL;
- goto err_free;
- }
-
- dev->power.subsys_data->domain_data = &gpd_data->base;
-
- spin_unlock_irq(&dev->power.lock);
-
- return gpd_data;
-
- err_free:
- spin_unlock_irq(&dev->power.lock);
- kfree(gpd_data);
- err_put:
- dev_pm_put_subsys_data(dev);
- return ERR_PTR(ret);
-}
-
-static void genpd_free_dev_data(struct device *dev,
- struct generic_pm_domain_data *gpd_data)
-{
- spin_lock_irq(&dev->power.lock);
-
- dev->power.subsys_data->domain_data = NULL;
-
- spin_unlock_irq(&dev->power.lock);
-
- kfree(gpd_data);
- dev_pm_put_subsys_data(dev);
-}
-
-static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain_data *gpd_data;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
- gpd_data = genpd_alloc_dev_data(dev, td);
- if (IS_ERR(gpd_data))
- return PTR_ERR(gpd_data);
-
- genpd_lock(genpd);
-
- ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
- if (ret)
- goto out;
-
- dev_pm_domain_set(dev, &genpd->domain);
-
- genpd->device_count++;
- genpd->max_off_time_changed = true;
-
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
-
- out:
- genpd_unlock(genpd);
-
- if (ret)
- genpd_free_dev_data(dev, gpd_data);
- else
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
- return ret;
-}
-
-/**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
- * @genpd: PM domain to add the device to.
- * @dev: Device to be added.
- */
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
-{
- int ret;
-
- mutex_lock(&gpd_list_lock);
- ret = genpd_add_device(genpd, dev, NULL);
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_add_device);
-
-static int genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- struct generic_pm_domain_data *gpd_data;
- struct pm_domain_data *pdd;
- int ret = 0;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- pdd = dev->power.subsys_data->domain_data;
- gpd_data = to_gpd_data(pdd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
-
- genpd_lock(genpd);
-
- if (genpd->prepared_count > 0) {
- ret = -EAGAIN;
- goto out;
- }
-
- genpd->device_count--;
- genpd->max_off_time_changed = true;
-
- if (genpd->detach_dev)
- genpd->detach_dev(genpd, dev);
-
- dev_pm_domain_set(dev, NULL);
-
- list_del_init(&pdd->list_node);
-
- genpd_unlock(genpd);
-
- genpd_free_dev_data(dev, gpd_data);
-
- return 0;
-
- out:
- genpd_unlock(genpd);
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
- return ret;
-}
-
-/**
- * pm_genpd_remove_device - Remove a device from an I/O PM domain.
- * @dev: Device to be removed.
- */
-int pm_genpd_remove_device(struct device *dev)
-{
- struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
-
- if (!genpd)
- return -EINVAL;
-
- return genpd_remove_device(genpd, dev);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
-
-static int genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *subdomain)
-{
- struct gpd_link *link, *itr;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
- || genpd == subdomain)
- return -EINVAL;
-
- /*
- * If the domain can be powered on/off in an IRQ safe
- * context, ensure that the subdomain can also be
- * powered on/off in that context.
- */
- if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
- WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
- genpd->name, subdomain->name);
- return -EINVAL;
- }
-
- link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link)
- return -ENOMEM;
-
- genpd_lock(subdomain);
- genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
-
- if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
- ret = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(itr, &genpd->master_links, master_node) {
- if (itr->slave == subdomain && itr->master == genpd) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- link->master = genpd;
- list_add_tail(&link->master_node, &genpd->master_links);
- link->slave = subdomain;
- list_add_tail(&link->slave_node, &subdomain->slave_links);
- if (genpd_status_on(subdomain))
- genpd_sd_counter_inc(genpd);
-
- out:
- genpd_unlock(genpd);
- genpd_unlock(subdomain);
- if (ret)
- kfree(link);
- return ret;
-}
-
-/**
- * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @genpd: Master PM domain to add the subdomain to.
- * @subdomain: Subdomain to be added.
- */
-int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *subdomain)
-{
- int ret;
-
- mutex_lock(&gpd_list_lock);
- ret = genpd_add_subdomain(genpd, subdomain);
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
-
-/**
- * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
- * @genpd: Master PM domain to remove the subdomain from.
- * @subdomain: Subdomain to be removed.
- */
-int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *subdomain)
-{
- struct gpd_link *l, *link;
- int ret = -EINVAL;
-
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
- return -EINVAL;
-
- genpd_lock(subdomain);
- genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
-
- if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
- pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
- subdomain->name);
- ret = -EBUSY;
- goto out;
- }
-
- list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
- if (link->slave != subdomain)
- continue;
-
- list_del(&link->master_node);
- list_del(&link->slave_node);
- kfree(link);
- if (genpd_status_on(subdomain))
- genpd_sd_counter_dec(genpd);
-
- ret = 0;
- break;
- }
-
-out:
- genpd_unlock(genpd);
- genpd_unlock(subdomain);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
-
-static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
-{
- struct genpd_power_state *state;
-
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- genpd->states = state;
- genpd->state_count = 1;
- genpd->free = state;
-
- return 0;
-}
-
-static void genpd_lock_init(struct generic_pm_domain *genpd)
-{
- if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
- spin_lock_init(&genpd->slock);
- genpd->lock_ops = &genpd_spin_ops;
- } else {
- mutex_init(&genpd->mlock);
- genpd->lock_ops = &genpd_mtx_ops;
- }
-}
-
-/**
- * pm_genpd_init - Initialize a generic I/O PM domain object.
- * @genpd: PM domain object to initialize.
- * @gov: PM domain governor to associate with the domain (may be NULL).
- * @is_off: Initial value of the domain's power_is_off field.
- *
- * Returns 0 on successful initialization, else a negative error code.
- */
-int pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off)
-{
- int ret;
-
- if (IS_ERR_OR_NULL(genpd))
- return -EINVAL;
-
- INIT_LIST_HEAD(&genpd->master_links);
- INIT_LIST_HEAD(&genpd->slave_links);
- INIT_LIST_HEAD(&genpd->dev_list);
- genpd_lock_init(genpd);
- genpd->gov = gov;
- INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
- atomic_set(&genpd->sd_count, 0);
- genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
- genpd->device_count = 0;
- genpd->max_off_time_ns = -1;
- genpd->max_off_time_changed = true;
- genpd->provider = NULL;
- genpd->has_provider = false;
- genpd->accounting_time = ktime_get();
- genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
- genpd->domain.ops.runtime_resume = genpd_runtime_resume;
- genpd->domain.ops.prepare = genpd_prepare;
- genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = genpd_resume_noirq;
- genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
- genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
- genpd->domain.ops.restore_noirq = genpd_restore_noirq;
- genpd->domain.ops.complete = genpd_complete;
-
- if (genpd->flags & GENPD_FLAG_PM_CLK) {
- genpd->dev_ops.stop = pm_clk_suspend;
- genpd->dev_ops.start = pm_clk_resume;
- }
-
- /* Always-on domains must be powered on at initialization. */
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
- return -EINVAL;
-
- /* Use only one "off" state if there were no states declared */
- if (genpd->state_count == 0) {
- ret = genpd_set_default_power_state(genpd);
- if (ret)
- return ret;
- } else if (!gov) {
- pr_warn("%s : no governor for states\n", genpd->name);
- }
-
- device_initialize(&genpd->dev);
- dev_set_name(&genpd->dev, "%s", genpd->name);
-
- mutex_lock(&gpd_list_lock);
- list_add(&genpd->gpd_list_node, &gpd_list);
- mutex_unlock(&gpd_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_init);
-
-static int genpd_remove(struct generic_pm_domain *genpd)
-{
- struct gpd_link *l, *link;
-
- if (IS_ERR_OR_NULL(genpd))
- return -EINVAL;
-
- genpd_lock(genpd);
-
- if (genpd->has_provider) {
- genpd_unlock(genpd);
- pr_err("Provider present, unable to remove %s\n", genpd->name);
- return -EBUSY;
- }
-
- if (!list_empty(&genpd->master_links) || genpd->device_count) {
- genpd_unlock(genpd);
- pr_err("%s: unable to remove %s\n", __func__, genpd->name);
- return -EBUSY;
- }
-
- list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
- list_del(&link->master_node);
- list_del(&link->slave_node);
- kfree(link);
- }
-
- list_del(&genpd->gpd_list_node);
- genpd_unlock(genpd);
- cancel_work_sync(&genpd->power_off_work);
- kfree(genpd->free);
- pr_debug("%s: removed %s\n", __func__, genpd->name);
-
- return 0;
-}
-
-/**
- * pm_genpd_remove - Remove a generic I/O PM domain
- * @genpd: Pointer to PM domain that is to be removed.
- *
- * To remove the PM domain, this function:
- * - Removes the PM domain as a subdomain to any parent domains,
- * if it was added.
- * - Removes the PM domain from the list of registered PM domains.
- *
- * The PM domain will only be removed, if the associated provider has
- * been removed, it is not a parent to any other PM domain and has no
- * devices associated with it.
- */
-int pm_genpd_remove(struct generic_pm_domain *genpd)
-{
- int ret;
-
- mutex_lock(&gpd_list_lock);
- ret = genpd_remove(genpd);
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_remove);
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-
-/*
- * Device Tree based PM domain providers.
- *
- * The code below implements generic device tree based PM domain providers that
- * bind device tree nodes with generic PM domains registered in the system.
- *
- * Any driver that registers generic PM domains and needs to support binding of
- * devices to these domains is supposed to register a PM domain provider, which
- * maps a PM domain specifier retrieved from the device tree to a PM domain.
- *
- * Two simple mapping functions have been provided for convenience:
- * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
- * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
- * index.
- */
-
-/**
- * struct of_genpd_provider - PM domain provider registration structure
- * @link: Entry in global list of PM domain providers
- * @node: Pointer to device tree node of PM domain provider
- * @xlate: Provider-specific xlate callback mapping a set of specifier cells
- * into a PM domain.
- * @data: context pointer to be passed into @xlate callback
- */
-struct of_genpd_provider {
- struct list_head link;
- struct device_node *node;
- genpd_xlate_t xlate;
- void *data;
-};
-
-/* List of registered PM domain providers. */
-static LIST_HEAD(of_genpd_providers);
-/* Mutex to protect the list above. */
-static DEFINE_MUTEX(of_genpd_mutex);
-
-/**
- * genpd_xlate_simple() - Xlate function for direct node-domain mapping
- * @genpdspec: OF phandle args to map into a PM domain
- * @data: xlate function private data - pointer to struct generic_pm_domain
- *
- * This is a generic xlate function that can be used to model PM domains that
- * have their own device tree nodes. The private data of xlate function needs
- * to be a valid pointer to struct generic_pm_domain.
- */
-static struct generic_pm_domain *genpd_xlate_simple(
- struct of_phandle_args *genpdspec,
- void *data)
-{
- return data;
-}
-
-/**
- * genpd_xlate_onecell() - Xlate function using a single index.
- * @genpdspec: OF phandle args to map into a PM domain
- * @data: xlate function private data - pointer to struct genpd_onecell_data
- *
- * This is a generic xlate function that can be used to model simple PM domain
- * controllers that have one device tree node and provide multiple PM domains.
- * A single cell is used as an index into an array of PM domains specified in
- * the genpd_onecell_data struct when registering the provider.
- */
-static struct generic_pm_domain *genpd_xlate_onecell(
- struct of_phandle_args *genpdspec,
- void *data)
-{
- struct genpd_onecell_data *genpd_data = data;
- unsigned int idx = genpdspec->args[0];
-
- if (genpdspec->args_count != 1)
- return ERR_PTR(-EINVAL);
-
- if (idx >= genpd_data->num_domains) {
- pr_err("%s: invalid domain index %u\n", __func__, idx);
- return ERR_PTR(-EINVAL);
- }
-
- if (!genpd_data->domains[idx])
- return ERR_PTR(-ENOENT);
-
- return genpd_data->domains[idx];
-}
-
-/**
- * genpd_add_provider() - Register a PM domain provider for a node
- * @np: Device node pointer associated with the PM domain provider.
- * @xlate: Callback for decoding PM domain from phandle arguments.
- * @data: Context pointer for @xlate callback.
- */
-static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
- void *data)
-{
- struct of_genpd_provider *cp;
-
- cp = kzalloc(sizeof(*cp), GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- cp->node = of_node_get(np);
- cp->data = data;
- cp->xlate = xlate;
-
- mutex_lock(&of_genpd_mutex);
- list_add(&cp->link, &of_genpd_providers);
- mutex_unlock(&of_genpd_mutex);
- pr_debug("Added domain provider from %pOF\n", np);
-
- return 0;
-}
-
-/**
- * of_genpd_add_provider_simple() - Register a simple PM domain provider
- * @np: Device node pointer associated with the PM domain provider.
- * @genpd: Pointer to PM domain associated with the PM domain provider.
- */
-int of_genpd_add_provider_simple(struct device_node *np,
- struct generic_pm_domain *genpd)
-{
- int ret = -EINVAL;
-
- if (!np || !genpd)
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
-
- if (!genpd_present(genpd))
- goto unlock;
-
- genpd->dev.of_node = np;
-
- /* Parse genpd OPP table */
- if (genpd->set_performance_state) {
- ret = dev_pm_opp_of_add_table(&genpd->dev);
- if (ret) {
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
- goto unlock;
- }
-
- /*
- * Save table for faster processing while setting performance
- * state.
- */
- genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
- WARN_ON(!genpd->opp_table);
- }
-
- ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
- if (ret) {
- if (genpd->set_performance_state) {
- dev_pm_opp_put_opp_table(genpd->opp_table);
- dev_pm_opp_of_remove_table(&genpd->dev);
- }
-
- goto unlock;
- }
-
- genpd->provider = &np->fwnode;
- genpd->has_provider = true;
-
-unlock:
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
-
-/**
- * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
- * @np: Device node pointer associated with the PM domain provider.
- * @data: Pointer to the data associated with the PM domain provider.
- */
-int of_genpd_add_provider_onecell(struct device_node *np,
- struct genpd_onecell_data *data)
-{
- struct generic_pm_domain *genpd;
- unsigned int i;
- int ret = -EINVAL;
-
- if (!np || !data)
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
-
- if (!data->xlate)
- data->xlate = genpd_xlate_onecell;
-
- for (i = 0; i < data->num_domains; i++) {
- genpd = data->domains[i];
-
- if (!genpd)
- continue;
- if (!genpd_present(genpd))
- goto error;
-
- genpd->dev.of_node = np;
-
- /* Parse genpd OPP table */
- if (genpd->set_performance_state) {
- ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
- if (ret) {
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
- goto error;
- }
-
- /*
- * Save table for faster processing while setting
- * performance state.
- */
- genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
- WARN_ON(!genpd->opp_table);
- }
-
- genpd->provider = &np->fwnode;
- genpd->has_provider = true;
- }
-
- ret = genpd_add_provider(np, data->xlate, data);
- if (ret < 0)
- goto error;
-
- mutex_unlock(&gpd_list_lock);
-
- return 0;
-
-error:
- while (i--) {
- genpd = data->domains[i];
-
- if (!genpd)
- continue;
-
- genpd->provider = NULL;
- genpd->has_provider = false;
-
- if (genpd->set_performance_state) {
- dev_pm_opp_put_opp_table(genpd->opp_table);
- dev_pm_opp_of_remove_table(&genpd->dev);
- }
- }
-
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
-
-/**
- * of_genpd_del_provider() - Remove a previously registered PM domain provider
- * @np: Device node pointer associated with the PM domain provider
- */
-void of_genpd_del_provider(struct device_node *np)
-{
- struct of_genpd_provider *cp, *tmp;
- struct generic_pm_domain *gpd;
-
- mutex_lock(&gpd_list_lock);
- mutex_lock(&of_genpd_mutex);
- list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
- if (cp->node == np) {
- /*
- * For each PM domain associated with the
- * provider, set the 'has_provider' to false
- * so that the PM domain can be safely removed.
- */
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (gpd->provider == &np->fwnode) {
- gpd->has_provider = false;
-
- if (!gpd->set_performance_state)
- continue;
-
- dev_pm_opp_put_opp_table(gpd->opp_table);
- dev_pm_opp_of_remove_table(&gpd->dev);
- }
- }
-
- list_del(&cp->link);
- of_node_put(cp->node);
- kfree(cp);
- break;
- }
- }
- mutex_unlock(&of_genpd_mutex);
- mutex_unlock(&gpd_list_lock);
-}
-EXPORT_SYMBOL_GPL(of_genpd_del_provider);
-
-/**
- * genpd_get_from_provider() - Look-up PM domain
- * @genpdspec: OF phandle args to use for look-up
- *
- * Looks for a PM domain provider under the node specified by @genpdspec and if
- * found, uses xlate function of the provider to map phandle args to a PM
- * domain.
- *
- * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
- * on failure.
- */
-static struct generic_pm_domain *genpd_get_from_provider(
- struct of_phandle_args *genpdspec)
-{
- struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
- struct of_genpd_provider *provider;
-
- if (!genpdspec)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&of_genpd_mutex);
-
- /* Check if we have such a provider in our array */
- list_for_each_entry(provider, &of_genpd_providers, link) {
- if (provider->node == genpdspec->np)
- genpd = provider->xlate(genpdspec, provider->data);
- if (!IS_ERR(genpd))
- break;
- }
-
- mutex_unlock(&of_genpd_mutex);
-
- return genpd;
-}
-
-/**
- * of_genpd_add_device() - Add a device to an I/O PM domain
- * @genpdspec: OF phandle args to use for look-up PM domain
- * @dev: Device to be added.
- *
- * Looks-up an I/O PM domain based upon phandle args provided and adds
- * the device to the PM domain. Returns a negative error code on failure.
- */
-int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- mutex_lock(&gpd_list_lock);
-
- genpd = genpd_get_from_provider(genpdspec);
- if (IS_ERR(genpd)) {
- ret = PTR_ERR(genpd);
- goto out;
- }
-
- ret = genpd_add_device(genpd, dev, NULL);
-
-out:
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_genpd_add_device);
-
-/**
- * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
- * @parent_spec: OF phandle args to use for parent PM domain look-up
- * @subdomain_spec: OF phandle args to use for subdomain look-up
- *
- * Looks-up a parent PM domain and subdomain based upon phandle args
- * provided and adds the subdomain to the parent PM domain. Returns a
- * negative error code on failure.
- */
-int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
-{
- struct generic_pm_domain *parent, *subdomain;
- int ret;
-
- mutex_lock(&gpd_list_lock);
-
- parent = genpd_get_from_provider(parent_spec);
- if (IS_ERR(parent)) {
- ret = PTR_ERR(parent);
- goto out;
- }
-
- subdomain = genpd_get_from_provider(subdomain_spec);
- if (IS_ERR(subdomain)) {
- ret = PTR_ERR(subdomain);
- goto out;
- }
-
- ret = genpd_add_subdomain(parent, subdomain);
-
-out:
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
-
-/**
- * of_genpd_remove_last - Remove the last PM domain registered for a provider
- * @provider: Pointer to device structure associated with provider
- *
- * Find the last PM domain that was added by a particular provider and
- * remove this PM domain from the list of PM domains. The provider is
- * identified by the 'provider' device structure that is passed. The PM
- * domain will only be removed, if the provider associated with domain
- * has been removed.
- *
- * Returns a valid pointer to struct generic_pm_domain on success or
- * ERR_PTR() on failure.
- */
-struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
-{
- struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
- int ret;
-
- if (IS_ERR_OR_NULL(np))
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
- if (gpd->provider == &np->fwnode) {
- ret = genpd_remove(gpd);
- genpd = ret ? ERR_PTR(ret) : gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
- return genpd;
-}
-EXPORT_SYMBOL_GPL(of_genpd_remove_last);
-
-static void genpd_release_dev(struct device *dev)
-{
- kfree(dev);
-}
-
-static struct bus_type genpd_bus_type = {
- .name = "genpd",
-};
-
-/**
- * genpd_dev_pm_detach - Detach a device from its PM domain.
- * @dev: Device to detach.
- * @power_off: Currently not used
- *
- * Try to locate a corresponding generic PM domain, which the device was
- * attached to previously. If such is found, the device is detached from it.
- */
-static void genpd_dev_pm_detach(struct device *dev, bool power_off)
-{
- struct generic_pm_domain *pd;
- unsigned int i;
- int ret = 0;
-
- pd = dev_to_genpd(dev);
- if (IS_ERR(pd))
- return;
-
- dev_dbg(dev, "removing from PM domain %s\n", pd->name);
-
- for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
- ret = genpd_remove_device(pd, dev);
- if (ret != -EAGAIN)
- break;
-
- mdelay(i);
- cond_resched();
- }
-
- if (ret < 0) {
- dev_err(dev, "failed to remove from PM domain %s: %d",
- pd->name, ret);
- return;
- }
-
- /* Check if PM domain can be powered off after removing this device. */
- genpd_queue_power_off_work(pd);
-
- /* Unregister the device if it was created by genpd. */
- if (dev->bus == &genpd_bus_type)
- device_unregister(dev);
-}
-
-static void genpd_dev_pm_sync(struct device *dev)
-{
- struct generic_pm_domain *pd;
-
- pd = dev_to_genpd(dev);
- if (IS_ERR(pd))
- return;
-
- genpd_queue_power_off_work(pd);
-}
-
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
- unsigned int index, bool power_on)
-{
- struct of_phandle_args pd_args;
- struct generic_pm_domain *pd;
- int ret;
-
- ret = of_parse_phandle_with_args(np, "power-domains",
- "#power-domain-cells", index, &pd_args);
- if (ret < 0)
- return ret;
-
- mutex_lock(&gpd_list_lock);
- pd = genpd_get_from_provider(&pd_args);
- of_node_put(pd_args.np);
- if (IS_ERR(pd)) {
- mutex_unlock(&gpd_list_lock);
- dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
- __func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(dev);
- }
-
- dev_dbg(dev, "adding to PM domain %s\n", pd->name);
-
- ret = genpd_add_device(pd, dev, NULL);
- mutex_unlock(&gpd_list_lock);
-
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
- return ret;
- }
-
- dev->pm_domain->detach = genpd_dev_pm_detach;
- dev->pm_domain->sync = genpd_dev_pm_sync;
-
- if (power_on) {
- genpd_lock(pd);
- ret = genpd_power_on(pd, 0);
- genpd_unlock(pd);
- }
-
- if (ret)
- genpd_remove_device(pd, dev);
-
- return ret ? -EPROBE_DEFER : 1;
-}
-
-/**
- * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
- * @dev: Device to attach.
- *
- * Parse device's OF node to find a PM domain specifier. If such is found,
- * attaches the device to retrieved pm_domain ops.
- *
- * Returns 1 on successfully attached PM domain, 0 when the device don't need a
- * PM domain or when multiple power-domains exists for it, else a negative error
- * code. Note that if a power-domain exists for the device, but it cannot be
- * found or turned on, then return -EPROBE_DEFER to ensure that the device is
- * not probed and to re-try again later.
- */
-int genpd_dev_pm_attach(struct device *dev)
-{
- if (!dev->of_node)
- return 0;
-
- /*
- * Devices with multiple PM domains must be attached separately, as we
- * can only attach one PM domain per device.
- */
- if (of_count_phandle_with_args(dev->of_node, "power-domains",
- "#power-domain-cells") != 1)
- return 0;
-
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
-}
-EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
-
-/**
- * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
- * @dev: The device used to lookup the PM domain.
- * @index: The index of the PM domain.
- *
- * Parse device's OF node to find a PM domain specifier at the provided @index.
- * If such is found, creates a virtual device and attaches it to the retrieved
- * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
- * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
- *
- * Returns the created virtual device if successfully attached PM domain, NULL
- * when the device don't need a PM domain, else an ERR_PTR() in case of
- * failures. If a power-domain exists for the device, but cannot be found or
- * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
- * is not probed and to re-try again later.
- */
-struct device *genpd_dev_pm_attach_by_id(struct device *dev,
- unsigned int index)
-{
- struct device *virt_dev;
- int num_domains;
- int ret;
-
- if (!dev->of_node)
- return NULL;
-
- /* Deal only with devices using multiple PM domains. */
- num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
- "#power-domain-cells");
- if (num_domains < 2 || index >= num_domains)
- return NULL;
-
- /* Allocate and register device on the genpd bus. */
- virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
- if (!virt_dev)
- return ERR_PTR(-ENOMEM);
-
- dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
- virt_dev->bus = &genpd_bus_type;
- virt_dev->release = genpd_release_dev;
-
- ret = device_register(virt_dev);
- if (ret) {
- kfree(virt_dev);
- return ERR_PTR(ret);
- }
-
- /* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
- if (ret < 1) {
- device_unregister(virt_dev);
- return ret ? ERR_PTR(ret) : NULL;
- }
-
- pm_runtime_enable(virt_dev);
- genpd_queue_power_off_work(dev_to_genpd(virt_dev));
-
- return virt_dev;
-}
-EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
-
-/**
- * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
- * @dev: The device used to lookup the PM domain.
- * @name: The name of the PM domain.
- *
- * Parse device's OF node to find a PM domain specifier using the
- * power-domain-names DT property. For further description see
- * genpd_dev_pm_attach_by_id().
- */
-struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name)
-{
- int index;
-
- if (!dev->of_node)
- return NULL;
-
- index = of_property_match_string(dev->of_node, "power-domain-names",
- name);
- if (index < 0)
- return NULL;
-
- return genpd_dev_pm_attach_by_id(dev, index);
-}
-
-static const struct of_device_id idle_state_match[] = {
- { .compatible = "domain-idle-state", },
- { }
-};
-
-static int genpd_parse_state(struct genpd_power_state *genpd_state,
- struct device_node *state_node)
-{
- int err;
- u32 residency;
- u32 entry_latency, exit_latency;
-
- err = of_property_read_u32(state_node, "entry-latency-us",
- &entry_latency);
- if (err) {
- pr_debug(" * %pOF missing entry-latency-us property\n",
- state_node);
- return -EINVAL;
- }
-
- err = of_property_read_u32(state_node, "exit-latency-us",
- &exit_latency);
- if (err) {
- pr_debug(" * %pOF missing exit-latency-us property\n",
- state_node);
- return -EINVAL;
- }
-
- err = of_property_read_u32(state_node, "min-residency-us", &residency);
- if (!err)
- genpd_state->residency_ns = 1000 * residency;
-
- genpd_state->power_on_latency_ns = 1000 * exit_latency;
- genpd_state->power_off_latency_ns = 1000 * entry_latency;
- genpd_state->fwnode = &state_node->fwnode;
-
- return 0;
-}
-
-static int genpd_iterate_idle_states(struct device_node *dn,
- struct genpd_power_state *states)
-{
- int ret;
- struct of_phandle_iterator it;
- struct device_node *np;
- int i = 0;
-
- ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
- if (ret <= 0)
- return ret;
-
- /* Loop over the phandles until all the requested entry is found */
- of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
- np = it.node;
- if (!of_match_node(idle_state_match, np))
- continue;
- if (states) {
- ret = genpd_parse_state(&states[i], np);
- if (ret) {
- pr_err("Parsing idle state node %pOF failed with err %d\n",
- np, ret);
- of_node_put(np);
- return ret;
- }
- }
- i++;
- }
-
- return i;
-}
-
-/**
- * of_genpd_parse_idle_states: Return array of idle states for the genpd.
- *
- * @dn: The genpd device node
- * @states: The pointer to which the state array will be saved.
- * @n: The count of elements in the array returned from this function.
- *
- * Returns the device states parsed from the OF node. The memory for the states
- * is allocated by this function and is the responsibility of the caller to
- * free the memory after use. If any or zero compatible domain idle states is
- * found it returns 0 and in case of errors, a negative error code is returned.
- */
-int of_genpd_parse_idle_states(struct device_node *dn,
- struct genpd_power_state **states, int *n)
-{
- struct genpd_power_state *st;
- int ret;
-
- ret = genpd_iterate_idle_states(dn, NULL);
- if (ret < 0)
- return ret;
-
- if (!ret) {
- *states = NULL;
- *n = 0;
- return 0;
- }
-
- st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
- if (!st)
- return -ENOMEM;
-
- ret = genpd_iterate_idle_states(dn, st);
- if (ret <= 0) {
- kfree(st);
- return ret < 0 ? ret : -EINVAL;
- }
-
- *states = st;
- *n = ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
-
-/**
- * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
- *
- * @genpd_dev: Genpd's device for which the performance-state needs to be found.
- * @opp: struct dev_pm_opp of the OPP for which we need to find performance
- * state.
- *
- * Returns performance state encoded in the OPP of the genpd. This calls
- * platform specific genpd->opp_to_performance_state() callback to translate
- * power domain OPP to performance state.
- *
- * Returns performance state on success and 0 on failure.
- */
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- struct generic_pm_domain *genpd = NULL;
- int state;
-
- genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
-
- if (unlikely(!genpd->opp_to_performance_state))
- return 0;
-
- genpd_lock(genpd);
- state = genpd->opp_to_performance_state(genpd, opp);
- genpd_unlock(genpd);
-
- return state;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
-
-static int __init genpd_bus_init(void)
-{
- return bus_register(&genpd_bus_type);
-}
-core_initcall(genpd_bus_init);
-
-#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
-
-
-/*** debugfs support ***/
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/pm.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/kobject.h>
-static struct dentry *genpd_debugfs_dir;
-
-/*
- * TODO: This function is a slightly modified version of rtpm_status_show
- * from sysfs.c, so generalize it.
- */
-static void rtpm_status_str(struct seq_file *s, struct device *dev)
-{
- static const char * const status_lookup[] = {
- [RPM_ACTIVE] = "active",
- [RPM_RESUMING] = "resuming",
- [RPM_SUSPENDED] = "suspended",
- [RPM_SUSPENDING] = "suspending"
- };
- const char *p = "";
-
- if (dev->power.runtime_error)
- p = "error";
- else if (dev->power.disable_depth)
- p = "unsupported";
- else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
- p = status_lookup[dev->power.runtime_status];
- else
- WARN_ON(1);
-
- seq_puts(s, p);
-}
-
-static int genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *genpd)
-{
- static const char * const status_lookup[] = {
- [GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_POWER_OFF] = "off"
- };
- struct pm_domain_data *pm_data;
- const char *kobj_path;
- struct gpd_link *link;
- char state[16];
- int ret;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
- goto exit;
- if (!genpd_status_on(genpd))
- snprintf(state, sizeof(state), "%s-%u",
- status_lookup[genpd->status], genpd->state_idx);
- else
- snprintf(state, sizeof(state), "%s",
- status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-15s ", genpd->name, state);
-
- /*
- * Modifications on the list require holding locks on both
- * master and slave, so we are safe.
- * Also genpd->name is immutable.
- */
- list_for_each_entry(link, &genpd->master_links, master_node) {
- seq_printf(s, "%s", link->slave->name);
- if (!list_is_last(&link->master_node, &genpd->master_links))
- seq_puts(s, ", ");
- }
-
- list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj,
- genpd_is_irq_safe(genpd) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (kobj_path == NULL)
- continue;
-
- seq_printf(s, "\n %-50s ", kobj_path);
- rtpm_status_str(s, pm_data->dev);
- kfree(kobj_path);
- }
-
- seq_puts(s, "\n");
-exit:
- genpd_unlock(genpd);
-
- return 0;
-}
-
-static int summary_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd;
- int ret = 0;
-
- seq_puts(s, "domain status slaves\n");
- seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------\n");
-
- ret = mutex_lock_interruptible(&gpd_list_lock);
- if (ret)
- return -ERESTARTSYS;
-
- list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- ret = genpd_summary_one(s, genpd);
- if (ret)
- break;
- }
- mutex_unlock(&gpd_list_lock);
-
- return ret;
-}
-
-static int status_show(struct seq_file *s, void *data)
-{
- static const char * const status_lookup[] = {
- [GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_POWER_OFF] = "off"
- };
-
- struct generic_pm_domain *genpd = s->private;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
- goto exit;
-
- if (genpd->status == GPD_STATE_POWER_OFF)
- seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
- genpd->state_idx);
- else
- seq_printf(s, "%s\n", status_lookup[genpd->status]);
-exit:
- genpd_unlock(genpd);
- return ret;
-}
-
-static int sub_domains_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
- struct gpd_link *link;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- list_for_each_entry(link, &genpd->master_links, master_node)
- seq_printf(s, "%s\n", link->slave->name);
-
- genpd_unlock(genpd);
- return ret;
-}
-
-static int idle_states_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
- unsigned int i;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- seq_puts(s, "State Time Spent(ms)\n");
-
- for (i = 0; i < genpd->state_count; i++) {
- ktime_t delta = 0;
- s64 msecs;
-
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
- (genpd->state_idx == i))
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
-
- msecs = ktime_to_ms(
- ktime_add(genpd->states[i].idle_time, delta));
- seq_printf(s, "S%-13i %lld\n", i, msecs);
- }
-
- genpd_unlock(genpd);
- return ret;
-}
-
-static int active_time_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
- ktime_t delta = 0;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- if (genpd->status == GPD_STATE_ACTIVE)
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
-
- seq_printf(s, "%lld ms\n", ktime_to_ms(
- ktime_add(genpd->on_time, delta)));
-
- genpd_unlock(genpd);
- return ret;
-}
-
-static int total_idle_time_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
- ktime_t delta = 0, total = 0;
- unsigned int i;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- for (i = 0; i < genpd->state_count; i++) {
-
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
- (genpd->state_idx == i))
- delta = ktime_sub(ktime_get(), genpd->accounting_time);
-
- total = ktime_add(total, genpd->states[i].idle_time);
- }
- total = ktime_add(total, delta);
-
- seq_printf(s, "%lld ms\n", ktime_to_ms(total));
-
- genpd_unlock(genpd);
- return ret;
-}
-
-
-static int devices_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
- struct pm_domain_data *pm_data;
- const char *kobj_path;
- int ret = 0;
-
- ret = genpd_lock_interruptible(genpd);
- if (ret)
- return -ERESTARTSYS;
-
- list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj,
- genpd_is_irq_safe(genpd) ?
- GFP_ATOMIC : GFP_KERNEL);
- if (kobj_path == NULL)
- continue;
-
- seq_printf(s, "%s\n", kobj_path);
- kfree(kobj_path);
- }
-
- genpd_unlock(genpd);
- return ret;
-}
-
-static int perf_state_show(struct seq_file *s, void *data)
-{
- struct generic_pm_domain *genpd = s->private;
-
- if (genpd_lock_interruptible(genpd))
- return -ERESTARTSYS;
-
- seq_printf(s, "%u\n", genpd->performance_state);
-
- genpd_unlock(genpd);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(summary);
-DEFINE_SHOW_ATTRIBUTE(status);
-DEFINE_SHOW_ATTRIBUTE(sub_domains);
-DEFINE_SHOW_ATTRIBUTE(idle_states);
-DEFINE_SHOW_ATTRIBUTE(active_time);
-DEFINE_SHOW_ATTRIBUTE(total_idle_time);
-DEFINE_SHOW_ATTRIBUTE(devices);
-DEFINE_SHOW_ATTRIBUTE(perf_state);
-
-static int __init genpd_debug_init(void)
-{
- struct dentry *d;
- struct generic_pm_domain *genpd;
-
- genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
-
- if (!genpd_debugfs_dir)
- return -ENOMEM;
-
- d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &summary_fops);
- if (!d)
- return -ENOMEM;
-
- list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
- if (!d)
- return -ENOMEM;
-
- debugfs_create_file("current_state", 0444,
- d, genpd, &status_fops);
- debugfs_create_file("sub_domains", 0444,
- d, genpd, &sub_domains_fops);
- debugfs_create_file("idle_states", 0444,
- d, genpd, &idle_states_fops);
- debugfs_create_file("active_time", 0444,
- d, genpd, &active_time_fops);
- debugfs_create_file("total_idle_time", 0444,
- d, genpd, &total_idle_time_fops);
- debugfs_create_file("devices", 0444,
- d, genpd, &devices_fops);
- if (genpd->set_performance_state)
- debugfs_create_file("perf_state", 0444,
- d, genpd, &perf_state_fops);
- }
-
- return 0;
-}
-late_initcall(genpd_debug_init);
-
-static void __exit genpd_debug_exit(void)
-{
- debugfs_remove_recursive(genpd_debugfs_dir);
-}
-__exitcall(genpd_debug_exit);
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
deleted file mode 100644
index 99896fbf18e4..000000000000
--- a/drivers/base/power/domain_governor.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * drivers/base/power/domain_governor.c - Governors for device PM domains.
- *
- * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/kernel.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-
-static int dev_update_qos_constraint(struct device *dev, void *data)
-{
- s64 *constraint_ns_p = data;
- s64 constraint_ns;
-
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
- /*
- * Only take suspend-time QoS constraints of devices into
- * account, because constraints updated after the device has
- * been suspended are not guaranteed to be taken into account
- * anyway. In order for them to take effect, the device has to
- * be resumed and suspended again.
- */
- constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
- } else {
- /*
- * The child is not in a domain and there's no info on its
- * suspend/resume latencies, so assume them to be negligible and
- * take its current PM QoS constraint (that's the only thing
- * known at this point anyway).
- */
- constraint_ns = dev_pm_qos_read_value(dev);
- constraint_ns *= NSEC_PER_USEC;
- }
-
- if (constraint_ns < *constraint_ns_p)
- *constraint_ns_p = constraint_ns;
-
- return 0;
-}
-
-/**
- * default_suspend_ok - Default PM domain governor routine to suspend devices.
- * @dev: Device to check.
- */
-static bool default_suspend_ok(struct device *dev)
-{
- struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
- unsigned long flags;
- s64 constraint_ns;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (!td->constraint_changed) {
- bool ret = td->cached_suspend_ok;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
- return ret;
- }
- td->constraint_changed = false;
- td->cached_suspend_ok = false;
- td->effective_constraint_ns = 0;
- constraint_ns = __dev_pm_qos_read_value(dev);
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- if (constraint_ns == 0)
- return false;
-
- constraint_ns *= NSEC_PER_USEC;
- /*
- * We can walk the children without any additional locking, because
- * they all have been suspended at this point and their
- * effective_constraint_ns fields won't be modified in parallel with us.
- */
- if (!dev->power.ignore_children)
- device_for_each_child(dev, &constraint_ns,
- dev_update_qos_constraint);
-
- if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
- /* "No restriction", so the device is allowed to suspend. */
- td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
- td->cached_suspend_ok = true;
- } else if (constraint_ns == 0) {
- /*
- * This triggers if one of the children that don't belong to a
- * domain has a zero PM QoS constraint and it's better not to
- * suspend then. effective_constraint_ns is zero already and
- * cached_suspend_ok is false, so bail out.
- */
- return false;
- } else {
- constraint_ns -= td->suspend_latency_ns +
- td->resume_latency_ns;
- /*
- * effective_constraint_ns is zero already and cached_suspend_ok
- * is false, so if the computed value is not positive, return
- * right away.
- */
- if (constraint_ns <= 0)
- return false;
-
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = true;
- }
-
- /*
- * The children have been suspended already, so we don't need to take
- * their suspend latencies into account here.
- */
- return td->cached_suspend_ok;
-}
-
-static bool __default_power_down_ok(struct dev_pm_domain *pd,
- unsigned int state)
-{
- struct generic_pm_domain *genpd = pd_to_genpd(pd);
- struct gpd_link *link;
- struct pm_domain_data *pdd;
- s64 min_off_time_ns;
- s64 off_on_time_ns;
-
- off_on_time_ns = genpd->states[state].power_off_latency_ns +
- genpd->states[state].power_on_latency_ns;
-
-
- min_off_time_ns = -1;
- /*
- * Check if subdomains can be off for enough time.
- *
- * All subdomains have been powered off already at this point.
- */
- list_for_each_entry(link, &genpd->master_links, master_node) {
- struct generic_pm_domain *sd = link->slave;
- s64 sd_max_off_ns = sd->max_off_time_ns;
-
- if (sd_max_off_ns < 0)
- continue;
-
- /*
- * Check if the subdomain is allowed to be off long enough for
- * the current domain to turn off and on (that's how much time
- * it will have to wait worst case).
- */
- if (sd_max_off_ns <= off_on_time_ns)
- return false;
-
- if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
- min_off_time_ns = sd_max_off_ns;
- }
-
- /*
- * Check if the devices in the domain can be off enough time.
- */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- struct gpd_timing_data *td;
- s64 constraint_ns;
-
- /*
- * Check if the device is allowed to be off long enough for the
- * domain to turn off and on (that's how much time it will
- * have to wait worst case).
- */
- td = &to_gpd_data(pdd)->td;
- constraint_ns = td->effective_constraint_ns;
- /*
- * Zero means "no suspend at all" and this runs only when all
- * devices in the domain are suspended, so it must be positive.
- */
- if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
- continue;
-
- if (constraint_ns <= off_on_time_ns)
- return false;
-
- if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
- min_off_time_ns = constraint_ns;
- }
-
- /*
- * If the computed minimum device off time is negative, there are no
- * latency constraints, so the domain can spend arbitrary time in the
- * "off" state.
- */
- if (min_off_time_ns < 0)
- return true;
-
- /*
- * The difference between the computed minimum subdomain or device off
- * time and the time needed to turn the domain on is the maximum
- * theoretical time this domain can spend in the "off" state.
- */
- genpd->max_off_time_ns = min_off_time_ns -
- genpd->states[state].power_on_latency_ns;
- return true;
-}
-
-/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
- * @pd: PM domain to check.
- *
- * This routine must be executed under the PM domain's lock.
- */
-static bool default_power_down_ok(struct dev_pm_domain *pd)
-{
- struct generic_pm_domain *genpd = pd_to_genpd(pd);
- struct gpd_link *link;
-
- if (!genpd->max_off_time_changed)
- return genpd->cached_power_down_ok;
-
- /*
- * We have to invalidate the cached results for the masters, so
- * use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
- * returns.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
-
- genpd->max_off_time_ns = -1;
- genpd->max_off_time_changed = false;
- genpd->cached_power_down_ok = true;
- genpd->state_idx = genpd->state_count - 1;
-
- /* Find a state to power down to, starting from the deepest. */
- while (!__default_power_down_ok(pd, genpd->state_idx)) {
- if (genpd->state_idx == 0) {
- genpd->cached_power_down_ok = false;
- break;
- }
- genpd->state_idx--;
- }
-
- return genpd->cached_power_down_ok;
-}
-
-static bool always_on_power_down_ok(struct dev_pm_domain *domain)
-{
- return false;
-}
-
-struct dev_power_governor simple_qos_governor = {
- .suspend_ok = default_suspend_ok,
- .power_down_ok = default_power_down_ok,
-};
-
-/**
- * pm_genpd_gov_always_on - A governor implementing an always-on policy
- */
-struct dev_power_governor pm_domain_always_on_gov = {
- .power_down_ok = always_on_power_down_ok,
- .suspend_ok = default_suspend_ok,
-};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index b2ed606265a8..af99bbcf281c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -1,15 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#define CALL_PM_OP(dev, op) \
+({ \
+ struct device *_dev = (dev); \
+ const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
+ pm && pm->op ? pm->op(_dev) : 0; \
+})
+
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
@@ -21,12 +26,7 @@
*/
int pm_generic_runtime_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
@@ -40,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*/
int pm_generic_runtime_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_resume);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
@@ -74,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
*/
int pm_generic_suspend_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+ return CALL_PM_OP(dev, suspend_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
@@ -86,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+ return CALL_PM_OP(dev, suspend_late);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
@@ -98,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
*/
int pm_generic_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend ? pm->suspend(dev) : 0;
+ return CALL_PM_OP(dev, suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -110,33 +99,17 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+ return CALL_PM_OP(dev, freeze_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze ? pm->freeze(dev) : 0;
+ return CALL_PM_OP(dev, freeze);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -146,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
@@ -158,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
*/
int pm_generic_poweroff_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_late);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
@@ -170,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
*/
int pm_generic_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+ return CALL_PM_OP(dev, poweroff);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -182,33 +149,17 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+ return CALL_PM_OP(dev, thaw_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw ? pm->thaw(dev) : 0;
+ return CALL_PM_OP(dev, thaw);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -218,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+ return CALL_PM_OP(dev, resume_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -230,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+ return CALL_PM_OP(dev, resume_early);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
@@ -242,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
*/
int pm_generic_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume ? pm->resume(dev) : 0;
+ return CALL_PM_OP(dev, resume);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -254,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+ return CALL_PM_OP(dev, restore_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -266,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+ return CALL_PM_OP(dev, restore_early);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
@@ -278,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
*/
int pm_generic_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore ? pm->restore(dev) : 0;
+ return CALL_PM_OP(dev, restore);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0992e67e862b..97a8b4fcf471 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
- * This file is released under the GPLv2
- *
- *
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
@@ -17,6 +15,9 @@
* subsystem list maintains.
*/
+#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/device.h>
#include <linux/export.h>
#include <linux/mutex.h>
@@ -31,9 +32,9 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
-#include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
+#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
@@ -56,12 +57,26 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
-struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -81,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
+ case PM_EVENT_POWEROFF:
+ return "poweroff";
default:
return "(unknown PM event)";
}
@@ -124,7 +141,11 @@ void device_pm_unlock(void)
*/
void device_pm_add(struct device *dev)
{
- pr_debug("PM: Adding info for %s:%s\n",
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
mutex_lock(&dpm_list_mtx);
@@ -142,7 +163,10 @@ void device_pm_add(struct device *dev)
*/
void device_pm_remove(struct device *dev)
{
- pr_debug("PM: Removing info for %s:%s\n",
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
@@ -161,7 +185,7 @@ void device_pm_remove(struct device *dev)
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s before %s:%s\n",
+ pr_debug("Moving %s:%s before %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert before devb. */
@@ -175,7 +199,7 @@ void device_pm_move_before(struct device *deva, struct device *devb)
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
- pr_debug("PM: Moving %s:%s after %s:%s\n",
+ pr_debug("Moving %s:%s after %s:%s\n",
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
/* Delete deva from dpm_list and reinsert after devb. */
@@ -188,7 +212,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
*/
void device_pm_move_last(struct device *dev)
{
- pr_debug("PM: Moving %s:%s to end of list\n",
+ pr_debug("Moving %s:%s to end of list\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
list_move_tail(&dev->power.entry, &dpm_list);
}
@@ -198,7 +222,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -208,16 +232,13 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
void *cb, int error)
{
ktime_t rettime;
- s64 nsecs;
if (!pm_print_times_enabled)
return;
rettime = ktime_get();
- nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
-
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
- (unsigned long long)nsecs >> 10);
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
+ (unsigned long long)ktime_us_delta(rettime, calltime));
}
/**
@@ -242,7 +263,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -259,17 +280,46 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
-static void dpm_wait_for_superior(struct device *dev, bool async)
+static bool dpm_wait_for_superior(struct device *dev, bool async)
{
- dpm_wait(dev->parent, async);
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@@ -288,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -319,12 +370,12 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw;
- break;
case PM_EVENT_RESTORE:
return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
@@ -354,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -388,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -403,16 +456,16 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
{
- dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
+ dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
- ", may wakeup" : "");
+ ", may wakeup" : "", dev->power.driver_flags);
}
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -450,7 +503,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -462,14 +515,20 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
+static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
+module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
+MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
+ "Backtrace all CPUs on DPM watchdog timeout");
+
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -477,12 +536,28 @@ struct dpm_watchdog {
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ unsigned int this_cpu = smp_processor_id();
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ if (dpm_watchdog_all_cpu_backtrace)
+ trigger_allbutcpu_cpu_backtrace(this_cpu);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
+
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -496,10 +571,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -511,8 +587,8 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
- del_timer_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_delete_sync(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -523,86 +599,121 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * dev_pm_skip_resume - System-wide device resume optimization check.
* @dev: Target device.
*
- * Make the core skip the "early resume" and "resume" phases for @dev.
- *
- * This function can be called by middle-layer code during the "noirq" phase of
- * system resume if necessary, but not by device drivers.
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
*/
-void dev_pm_skip_next_resume_phases(struct device *dev)
+bool dev_pm_skip_resume(struct device *dev)
{
- dev->power.is_late_suspended = false;
- dev->power.is_suspended = false;
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
+
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
+
+ return !dev->power.must_resume;
}
-/**
- * suspend_event - Return a "suspend" message for given "resume" one.
- * @resume_msg: PM message representing a system-wide resume transition.
- */
-static pm_message_t suspend_event(pm_message_t resume_msg)
+static bool is_async(struct device *dev)
{
- switch (resume_msg.event) {
- case PM_EVENT_RESUME:
- return PMSG_SUSPEND;
- case PM_EVENT_THAW:
- case PM_EVENT_RESTORE:
- return PMSG_FREEZE;
- case PM_EVENT_RECOVER:
- return PMSG_HIBERNATE;
- }
- return PMSG_ON;
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
}
-/**
- * dev_pm_may_skip_resume - System-wide device resume optimization check.
- * @dev: Target device.
- *
- * Checks whether or not the device may be left in suspend after a system-wide
- * transition to the working state.
- */
-bool dev_pm_may_skip_resume(struct device *dev)
+static bool __dpm_async(struct device *dev, async_func_t func)
{
- return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+ if (dev->power.work_in_progress)
+ return true;
+
+ if (!is_async(dev))
+ return false;
+
+ dev->power.work_in_progress = true;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+
+ return false;
}
-static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
+static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- pm_callback_t callback;
- const char *info;
+ guard(mutex)(&async_wip_mtx);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
+ return __dpm_async(dev, func);
+}
+
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
- if (info_p)
- *info_p = info;
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
- return callback;
+ return 0;
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
+ /*
+ * Prevent racing with dpm_clear_async_state() during initial list
+ * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
+ * dpm_resume().
+ */
+ guard(mutex)(&dpm_list_mtx);
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p);
+ /*
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ */
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
+}
+
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
+static bool dpm_root_device(struct device *dev)
+{
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie);
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
@@ -613,10 +724,10 @@ static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
bool skip_resume;
int error = 0;
@@ -626,42 +737,59 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
- skip_resume = dev_pm_may_skip_resume(dev);
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active" unless its power.smart_suspend flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_smart_suspend(dev))
+ pm_runtime_set_active(dev);
- callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
if (skip_resume)
goto Skip;
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- pm_message_t suspend_msg = suspend_event(state);
-
- /*
- * If "freeze" callbacks have been skipped during a transition
- * related to hibernation, the subsequent "thaw" callbacks must
- * be skipped too or bad things may happen. Otherwise, resume
- * callbacks are going to be run for the device, so its runtime
- * PM status must be changed to reflect the new state after the
- * transition under way.
- */
- if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
- !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
- if (state.event == PM_EVENT_THAW) {
- skip_resume = true;
- goto Skip;
- } else {
- pm_runtime_set_active(dev);
- }
- }
- }
-
if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
@@ -673,96 +801,72 @@ Run:
Skip:
dev->power.is_noirq_suspended = false;
- if (skip_resume) {
- /*
- * The device is going to be left in suspend, but it might not
- * have been in runtime suspend before the system suspended, so
- * its runtime PM status needs to be updated to avoid confusing
- * the runtime PM framework when runtime PM is enabled for the
- * device again.
- */
- pm_runtime_set_suspended(dev);
- dev_pm_skip_next_resume_phases(dev);
- }
-
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- return error;
-}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
-
- error = device_resume_noirq(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ struct device *dev = data;
+ device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
-void dpm_noirq_resume_devices(pm_message_t state)
+static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_noirq, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
+ get_device(dev);
- error = device_resume_noirq(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
- }
- }
+ mutex_unlock(&dpm_list_mtx);
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
+ device_resume_noirq(dev, state, false);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
-}
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-void dpm_noirq_end(void)
-{
- resume_device_irqs();
- device_wakeup_disarm_wake_irqs();
- cpuidle_resume();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
@@ -775,38 +879,13 @@ void dpm_noirq_end(void)
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
- dpm_noirq_end();
-}
-static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -815,50 +894,77 @@ static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait_for_superior(dev, async);
+ if (dev->power.syscore)
+ goto Skip;
- callback = dpm_subsys_resume_early_cb(dev, state, &info);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
- Out:
+Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
- return error;
+
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
-
- error = device_resume_early(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ struct device *dev = data;
+ device_resume_early(dev, pm_transition, true);
put_device(dev);
}
@@ -872,45 +978,44 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
+ async_error = 0;
pm_transition = state;
+ mutex_lock(&dpm_list_mtx);
+
/*
- * Advanced the async threads upfront,
- * in case the starting of async threads is
- * delayed by non-async resuming devices.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume_early, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- mutex_unlock(&dpm_list_mtx);
- if (!is_async(dev)) {
- int error;
+ if (!dpm_async_fn(dev, async_resume_early)) {
+ get_device(dev);
- error = device_resume_early(dev, state, false);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume_early(dev, state, false);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
- mutex_lock(&dpm_list_mtx);
- put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -925,13 +1030,15 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state, bool async)
+static void device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -944,13 +1051,27 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
+ dev->power.is_suspended = false;
+
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
- dpm_wait_for_superior(dev, async);
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -960,9 +1081,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -1000,9 +1118,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
End:
error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1011,17 +1127,20 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
- return error;
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
+
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
{
- struct device *dev = (struct device *)data;
- int error;
+ struct device *dev = data;
- error = device_resume(dev, pm_transition, true);
- if (error)
- pm_dev_err(dev, pm_transition, " async", error);
+ device_resume(dev, pm_transition, true);
put_device(dev);
}
@@ -1038,45 +1157,43 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
+ */
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_resume, dev);
- }
+ dpm_clear_async_state(dev);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
- get_device(dev);
- if (!is_async(dev)) {
- int error;
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+ if (!dpm_async_fn(dev, async_resume)) {
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state, false);
- if (error) {
- suspend_stats.failed_resume++;
- dpm_save_failed_step(SUSPEND_RESUME);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, "", error);
- }
+ device_resume(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
}
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
- put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
+ if (READ_ONCE(async_error))
+ dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@@ -1094,7 +1211,7 @@ static void device_complete(struct device *dev, pm_message_t state)
const char *info = NULL;
if (dev->power.syscore)
- return;
+ goto out;
device_lock(dev);
@@ -1124,6 +1241,9 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
+out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1139,7 +1259,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1149,14 +1268,16 @@ void dpm_complete(pm_message_t state)
get_device(dev);
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
+
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
- mutex_lock(&dpm_list_mtx);
put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
@@ -1176,6 +1297,7 @@ void dpm_complete(pm_message_t state)
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
+ pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
@@ -1183,6 +1305,82 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
+}
+
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return false;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1214,69 +1412,16 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
}
-static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
-
-static bool device_must_resume(struct device *dev, pm_message_t state,
- bool no_subsys_suspend_noirq)
-{
- pm_message_t resume_msg = resume_event(state);
-
- /*
- * If all of the device driver's "noirq", "late" and "early" callbacks
- * are invoked directly by the core, the decision to allow the device to
- * stay in suspend can be based on its current runtime PM status and its
- * wakeup settings.
- */
- if (no_subsys_suspend_noirq &&
- !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
- !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
- !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
- return !pm_runtime_status_suspended(dev) &&
- (resume_msg.event != PM_EVENT_RESUME ||
- (device_can_wakeup(dev) && !device_may_wakeup(dev)));
-
- /*
- * The only safe strategy here is to require that if the device may not
- * be left in suspend, resume callbacks must be invoked for it.
- */
- return !dev->power.may_skip_resume;
-}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
/**
- * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1284,11 +1429,10 @@ static bool device_must_resume(struct device *dev, pm_message_t state,
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
- bool no_subsys_cb = false;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
@@ -1296,24 +1440,29 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
- if (pm_wakeup_pending()) {
- async_error = -EBUSY;
- goto Complete;
- }
-
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
-
- if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1324,20 +1473,24 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
Skip:
dev->power.is_noirq_suspended = true;
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
- dev->power.must_resume = dev->power.must_resume ||
- atomic_read(&dev->power.usage_count) > 1 ||
- device_must_resume(dev, state, no_subsys_cb);
- } else {
+ /*
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
+ */
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
- }
if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
@@ -1345,83 +1498,81 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
-}
-
-static void async_suspend_noirq(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
- error = __device_suspend_noirq(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ if (error || READ_ONCE(async_error))
+ return;
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
-static int device_suspend_noirq(struct device *dev)
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
+ struct device *dev = data;
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_noirq, dev);
- return 0;
- }
- return __device_suspend_noirq(dev, pm_transition, false);
-}
-
-void dpm_noirq_begin(void)
-{
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
+ device_suspend_noirq(dev, pm_transition, true);
+ put_device(dev);
}
-int dpm_noirq_suspend_devices(pm_message_t state)
+static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
+
+ list_move(&dev->power.entry, &dpm_noirq_list);
+
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev);
+ device_suspend_noirq(dev, state, false);
- mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, " noirq", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
- break;
- }
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (async_error)
+ mutex_lock(&dpm_list_mtx);
+
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
- if (error) {
- suspend_stats.failed_suspend_noirq++;
+ error = READ_ONCE(async_error);
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- }
+
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@@ -1438,7 +1589,9 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
- dpm_noirq_begin();
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
@@ -1455,79 +1608,70 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_lock_irq(&parent->power.lock);
- if (dev->power.wakeup_path && !parent->power.ignore_children)
+ if (device_wakeup_path(dev) && !parent->power.ignore_children)
parent->power.wakeup_path = true;
spin_unlock_irq(&parent->power.lock);
}
-static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
- pm_message_t state,
- const char **info_p)
-{
- pm_callback_t callback;
- const char *info;
-
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- } else {
- return NULL;
- }
-
- if (info_p)
- *info_p = info;
-
- return callback;
-}
+static void async_suspend_late(void *data, async_cookie_t cookie);
/**
- * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback;
- const char *info;
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
- callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
if (callback)
goto Run;
- if (dev_pm_smart_suspend_and_suspended(dev) &&
- !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ if (dev_pm_skip_suspend(dev))
goto Skip;
if (dev->driver && dev->driver->pm) {
@@ -1538,7 +1682,10 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1549,33 +1696,19 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
-}
-static void async_suspend_late(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
+ if (error || READ_ONCE(async_error))
+ return;
- error = __device_suspend_late(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
-static int device_suspend_late(struct device *dev)
+static void async_suspend_late(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend_late, dev);
- return 0;
- }
+ struct device *dev = data;
- return __device_suspend_late(dev, pm_transition, false);
+ device_suspend_late(dev, pm_transition, true);
+ put_device(dev);
}
/**
@@ -1585,42 +1718,63 @@ static int device_suspend_late(struct device *dev)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
- mutex_lock(&dpm_list_mtx);
+
pm_transition = state;
async_error = 0;
+ wake_up_all_idle_cpus();
+
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
+
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
+ if (dpm_async_fn(dev, async_suspend_late))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev);
+ device_suspend_late(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
- if (error) {
- pm_dev_err(dev, state, " late", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
}
- put_device(dev);
-
- if (async_error)
- break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
+
+ error = READ_ONCE(async_error);
if (error) {
- suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@@ -1635,17 +1789,20 @@ int dpm_suspend_late(pm_message_t state)
*/
int dpm_suspend_end(pm_message_t state)
{
- int error = dpm_suspend_late(state);
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
if (error)
- return error;
+ goto out;
error = dpm_suspend_noirq(state);
- if (error) {
+ if (error)
dpm_resume_early(resume_event(state));
- return error;
- }
- return 0;
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -1668,7 +1825,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1688,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1697,13 +1854,15 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
- * __device_suspend - Execute "suspend" callbacks for given device.
+ * device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1715,42 +1874,53 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
/*
- * If a device configured to wake up the system from sleep states
- * has been suspended at run time and there's a resume request pending
- * for it, this is equivalent to the device signaling wakeup, so the
- * system suspend operation should be aborted.
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
*/
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
+ pm_runtime_barrier(dev);
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || device_wakeup_path(dev))
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
+ }
pm_runtime_enable(dev);
}
dev->power.direct_complete = false;
}
- dev->power.may_skip_resume = false;
- dev->power.must_resume = false;
+ dev->power.may_skip_resume = true;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1807,39 +1977,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
- if (error)
- async_error = error;
+ if (error) {
+ WRITE_ONCE(async_error, error);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
-}
-
-static void async_suspend(void *data, async_cookie_t cookie)
-{
- struct device *dev = (struct device *)data;
- int error;
- error = __device_suspend(dev, pm_transition, true);
- if (error) {
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, pm_transition, " async", error);
- }
+ if (error || READ_ONCE(async_error))
+ return;
- put_device(dev);
+ dpm_async_suspend_superior(dev, async_suspend);
}
-static int device_suspend(struct device *dev)
+static void async_suspend(void *data, async_cookie_t cookie)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule(async_suspend, dev);
- return 0;
- }
+ struct device *dev = data;
- return __device_suspend(dev, pm_transition, false);
+ device_suspend(dev, pm_transition, true);
+ put_device(dev);
}
/**
@@ -1849,7 +2007,8 @@ static int device_suspend(struct device *dev)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
- int error = 0;
+ struct device *dev;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1857,43 +2016,103 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
- mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
+
+ mutex_lock(&dpm_list_mtx);
+
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
+
+ list_move(&dev->power.entry, &dpm_suspended_list);
+
+ if (dpm_async_fn(dev, async_suspend))
+ continue;
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev);
+ device_suspend(dev, state, false);
+
+ put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error) {
- pm_dev_err(dev, state, "", error);
- dpm_save_failed_dev(dev_name(dev));
- put_device(dev);
+
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
}
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_suspended_list);
- put_device(dev);
- if (async_error)
- break;
}
+
mutex_unlock(&dpm_list_mtx);
+
async_synchronize_full();
- if (!error)
- error = async_error;
- if (error) {
- suspend_stats.failed_suspend++;
+
+ error = READ_ONCE(async_error);
+ if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
- }
+
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ dev_for_each_link_to_supplier(link, dev) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1905,15 +2124,9 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
- if (dev->power.syscore)
- return 0;
-
- WARN_ON(!pm_runtime_enabled(dev) &&
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED));
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1921,10 +2134,21 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
+
+ if (dev->power.syscore)
+ return 0;
device_lock(dev);
dev->power.wakeup_path = false;
+ dev->power.out_band_wakeup = false;
if (dev->power.no_pm_callbacks)
goto unlock;
@@ -1948,10 +2172,17 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1959,12 +2190,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
- ((pm_runtime_suspended(dev) && ret > 0) ||
- dev->power.no_pm_callbacks) &&
- !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+ (ret > 0 || dev->power.no_pm_callbacks) &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -1979,7 +2210,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
@@ -1996,10 +2226,11 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
+ while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
+
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
@@ -2007,22 +2238,23 @@ int dpm_prepare(pm_message_t state)
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
- if (error) {
- if (error == -EAGAIN) {
- put_device(dev);
- error = 0;
- continue;
- }
- printk(KERN_INFO "PM: Device %s not prepared "
- "for power transition: code %d\n",
- dev_name(dev), error);
- put_device(dev);
- break;
+
+ if (!error) {
+ dev->power.is_prepared = true;
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ } else if (error == -EAGAIN) {
+ error = 0;
+ } else {
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
}
- dev->power.is_prepared = true;
- if (!list_empty(&dev->power.entry))
- list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+ mutex_unlock(&dpm_list_mtx);
+
put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
@@ -2038,29 +2270,33 @@ int dpm_prepare(pm_message_t state)
*/
int dpm_suspend_start(pm_message_t state)
{
+ ktime_t starttime = ktime_get();
int error;
error = dpm_prepare(state);
- if (error) {
- suspend_stats.failed_prepare++;
+ if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- } else
+ else {
+ pm_restrict_gfp_mask();
error = dpm_suspend(state);
+ }
+
+ dpm_show_time(starttime, state, error, "start");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
@@ -2108,7 +2344,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2117,11 +2355,10 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
-bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c511def48b48..922ed457db19 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,11 +21,15 @@ static inline void pm_runtime_early_init(struct device *dev)
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
- WAKE_IRQ_DEDICATED_MANAGED)
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;
@@ -38,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
-extern void dev_pm_disable_wake_irq_check(struct device *dev);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP
@@ -73,6 +78,7 @@ extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
#else /* CONFIG_PM */
@@ -87,6 +93,8 @@ static inline void pm_runtime_remove(struct device *dev) {}
static inline int dpm_sysfs_add(struct device *dev) { return 0; }
static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid) { return 0; }
#endif
@@ -116,6 +124,13 @@ static inline bool device_pm_initialized(struct device *dev)
return dev->power.in_dpm_list;
}
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -140,6 +155,11 @@ static inline bool device_pm_initialized(struct device *dev)
return device_is_registered(dev);
}
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
new file mode 100644
index 000000000000..79fc6c4418da
--- /dev/null
+++ b/drivers/base/power/qos-test.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+#include <kunit/test.h>
+#include <linux/pm_qos.h>
+
+/* Basic test for aggregating two "min" requests */
+static void freq_qos_test_min(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+
+ ret = freq_qos_remove_request(&req2);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+}
+
+/* Test that requests for MAX_DEFAULT_VALUE have no effect */
+static void freq_qos_test_maxdef(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* Add max 1000 */
+ ret = freq_qos_update_request(&req1, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Add max 2000, no impact */
+ ret = freq_qos_update_request(&req2, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Remove max 1000, new max 2000 */
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
+}
+
+/*
+ * Test that a freq_qos_request can be added again after removal
+ *
+ * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
+ * QoS requests after removal")
+ */
+static void freq_qos_test_readd(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req, 0, sizeof(req));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ /* Remove */
+ ret = freq_qos_remove_request(&req);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add again */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+}
+
+static struct kunit_case pm_qos_test_cases[] = {
+ KUNIT_CASE(freq_qos_test_min),
+ KUNIT_CASE(freq_qos_test_maxdef),
+ KUNIT_CASE(freq_qos_test_readd),
+ {},
+};
+
+static struct kunit_suite pm_qos_test_module = {
+ .name = "qos-kunit-test",
+ .test_cases = pm_qos_test_cases,
+};
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3382542b39b7..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
@@ -22,7 +18,7 @@
* per-device constraint data struct.
*
* Note about the per-device constraint data struct allocation:
- * . The per-device constraints data struct ptr is tored into the device
+ * . The per-device constraints data struct ptr is stored into the device
* dev_pm_info.
* . To minimize the data usage by the per-device constraints, the data struct
* is only allocated at the first call to dev_pm_qos_add_request.
@@ -94,33 +90,54 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
- * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
* This routine must be called with dev->power.lock held.
*/
-s32 __dev_pm_qos_read_value(struct device *dev)
+s32 __dev_pm_qos_resume_latency(struct device *dev)
{
lockdep_assert_held(&dev->power.lock);
- return dev_pm_qos_raw_read_value(dev);
+ return dev_pm_qos_raw_resume_latency(dev);
}
/**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
+ * @type: QoS request type.
*/
-s32 dev_pm_qos_read_value(struct device *dev)
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
{
+ struct dev_pm_qos *qos = dev->power.qos;
unsigned long flags;
s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
- ret = __dev_pm_qos_read_value(dev);
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ : pm_qos_read_value(&qos->resume_latency);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
@@ -153,6 +170,10 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_apply(&req->data.freq, action, value);
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -181,12 +202,11 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
if (!qos)
return -ENOMEM;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
+ n = kcalloc(3, sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
}
- BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->resume_latency;
plist_head_init(&c->list);
@@ -195,6 +215,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->latency_tolerance;
plist_head_init(&c->list);
@@ -203,6 +224,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
+ freq_constraints_init(&qos->freq);
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -256,11 +279,27 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+
+ c = &qos->freq.min_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.max_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -306,11 +345,22 @@ static int __dev_pm_qos_add_request(struct device *dev,
ret = dev_pm_qos_constraints_allocate(dev);
trace_dev_pm_qos_add_request(dev_name(dev), type, value);
- if (!ret) {
- req->dev = dev;
- req->type = type;
+ if (ret)
+ return ret;
+
+ req->dev = dev;
+ req->type = type;
+ if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MIN, value);
+ else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MAX, value);
+ else
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
+
return ret;
}
@@ -374,6 +424,10 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ curr_value = req->data.freq.pnode.prio;
+ break;
case DEV_PM_QOS_FLAGS:
curr_value = req->data.flr.flags;
break;
@@ -471,6 +525,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
*
* @dev: target device for the constraint
* @notifier: notifier block managed by caller.
+ * @type: request type.
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
@@ -478,7 +533,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
* If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails).
*/
-int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
int ret = 0;
@@ -489,10 +545,28 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
- if (!ret)
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -504,24 +578,44 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
*
* @dev: target device for the constraint
* @notifier: notifier block to be removed.
+ * @type: request type.
*
* Will remove the notifier from the notification chain that gets called
* upon changes to the target value.
*/
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{
- int retval = 0;
+ int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
- notifier);
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ goto unlock;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
mutex_unlock(&dev_pm_qos_mtx);
- return retval;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
@@ -581,6 +675,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
+ default:
+ WARN_ON(1);
+ return;
}
__dev_pm_qos_remove_request(req);
kfree(req);
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..1535ad2b0264
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ pm_runtime_barrier(dev);
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ pm_runtime_barrier(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 457be03b744d..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,18 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/sched/mm.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
+#include <linux/rculist.h>
#include <trace/events/rpm.h>
#include "../base.h"
@@ -20,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -37,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -64,30 +75,67 @@ static int rpm_suspend(struct device *dev, int rpmflags);
* runtime_status field is updated, to account the time in the old state
* correctly.
*/
-void update_pm_runtime_accounting(struct device *dev)
+static void update_pm_runtime_accounting(struct device *dev)
{
- unsigned long now = jiffies;
- unsigned long delta;
+ u64 now, last, delta;
+
+ if (dev->power.disable_depth > 0)
+ return;
- delta = now - dev->power.accounting_timestamp;
+ last = dev->power.accounting_timestamp;
+ now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
- if (dev->power.disable_depth > 0)
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timestamp.
+ */
+ if (now < last)
return;
+ delta = now - last;
+
if (dev->power.runtime_status == RPM_SUSPENDED)
- dev->power.suspended_jiffies += delta;
+ dev->power.suspended_time += delta;
else
- dev->power.active_jiffies += delta;
+ dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_rpm_status(dev, status);
dev->power.runtime_status = status;
}
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
@@ -95,7 +143,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- hrtimer_cancel(&dev->power.suspend_timer);
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
@@ -129,24 +177,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- u64 last_busy, expires = 0;
- u64 now = ktime_to_ns(ktime_get());
+ u64 expires;
if (!dev->power.use_autosuspend)
- goto out;
+ return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
- goto out;
-
- last_busy = READ_ONCE(dev->power.last_busy);
+ return 0;
- expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
- if (expires <= now)
- expires = 0; /* Already expired. */
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
- out:
- return expires;
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
@@ -172,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data)
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
- * device becomes active and the involed page I/O finishes. The
+ * device becomes active and the involved page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
@@ -212,8 +257,7 @@ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
* flag was set by any one of the descendants.
*/
if (!dev || (!enable &&
- device_for_each_child(dev, NULL,
- dev_memalloc_noio)))
+ device_for_each_child(dev, NULL, dev_memalloc_noio)))
break;
}
mutex_unlock(&dev_hotplug_mutex);
@@ -232,19 +276,17 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
- else if (atomic_read(&dev->power.usage_count) > 0)
+ else if (atomic_read(&dev->power.usage_count))
retval = -EAGAIN;
- else if (!dev->power.ignore_children &&
- atomic_read(&dev->power.child_count))
+ else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */
- else if ((dev->power.deferred_resume
- && dev->power.runtime_status == RPM_SUSPENDING)
- || (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME))
+ else if ((dev->power.deferred_resume &&
+ dev->power.runtime_status == RPM_SUSPENDING) ||
+ (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) == 0)
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -256,14 +298,11 @@ static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
- continue;
-
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
- link->rpm_active)
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -272,21 +311,59 @@ static int rpm_get_suppliers(struct device *dev)
pm_runtime_put_noidle(link->supplier);
return retval;
}
- link->rpm_active = true;
+ refcount_inc(&link->rpm_active);
}
return 0;
}
+/**
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
+ * @link: Target device link.
+ *
+ * Drop all runtime PM references associated with @link to its supplier device.
+ */
+void pm_runtime_release_supplier(struct device_link *link)
+{
+ struct device *supplier = link->supplier;
+
+ /*
+ * The additional power.usage_count check is a safety net in case
+ * the rpm_active refcount becomes saturated, in which case
+ * refcount_dec_not_one() would return true forever, but it is not
+ * strictly necessary.
+ */
+ while (refcount_dec_not_one(&link->rpm_active) &&
+ atomic_read(&supplier->power.usage_count) > 0)
+ pm_runtime_put_noidle(supplier);
+}
+
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
+}
+
static void rpm_put_suppliers(struct device *dev)
{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
struct device_link *link;
+ int idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->rpm_active &&
- READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
- pm_runtime_put(link->supplier);
- link->rpm_active = false;
- }
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
}
/**
@@ -297,7 +374,7 @@ static void rpm_put_suppliers(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval, idx;
+ int retval = 0, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) {
@@ -316,14 +393,17 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
- if (retval)
+ if (retval) {
+ rpm_put_suppliers(dev);
goto fail;
+ }
device_links_read_unlock(idx);
}
}
- retval = cb(dev);
+ if (cb)
+ retval = cb(dev);
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
@@ -335,14 +415,14 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
*
* Do that if resume fails too.
*/
- if (use_links
- && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
- || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ if (use_links &&
+ ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
+ (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
- fail:
- rpm_put_suppliers(dev);
+ __rpm_put_suppliers(dev, false);
+fail:
device_links_read_unlock(idx);
}
@@ -353,6 +433,49 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
}
/**
+ * rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+{
+ int retval;
+
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
+
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
+}
+
+/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
@@ -370,11 +493,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle_rcuidle(dev, rpmflags);
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -390,13 +516,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Act as though RPM_NOWAIT is always set. */
else if (dev->power.idle_notification)
retval = -EINPROGRESS;
+
if (retval)
goto out;
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks)
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+ /* If no callback assume success. */
+ if (!callback || dev->power.no_callbacks)
goto out;
/* Carry out an asynchronous or a synchronous idle notification. */
@@ -406,61 +536,33 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
dev->power.idle_notification = true;
- callback = RPM_GET_CALLBACK(dev, runtime_idle);
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = callback(dev);
- if (callback)
- retval = __rpm_callback(callback, dev);
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
- * rpm_callback - Run a given runtime PM callback for a given device.
- * @cb: Runtime PM callback to run.
- * @dev: Device to run the callback for.
- */
-static int rpm_callback(int (*cb)(struct device *), struct device *dev)
-{
- int retval;
-
- if (!cb)
- return -ENOSYS;
-
- if (dev->power.memalloc_noio) {
- unsigned int noio_flag;
-
- /*
- * Deadlock might be caused if memory allocation with
- * GFP_KERNEL happens inside runtime_suspend and
- * runtime_resume callbacks of one block device's
- * ancestor or the block device itself. Network
- * device might be thought as part of iSCSI block
- * device, so network device and its ancestor should
- * be marked as memalloc_noio too.
- */
- noio_flag = memalloc_noio_save();
- retval = __rpm_callback(cb, dev);
- memalloc_noio_restore(noio_flag);
- } else {
- retval = __rpm_callback(cb, dev);
- }
-
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
-}
-
-/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
@@ -488,24 +590,22 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend_rcuidle(dev, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
-
if (retval < 0)
- ; /* Conditions are wrong. */
+ goto out; /* Conditions are wrong. */
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
- else if (dev->power.runtime_status == RPM_RESUMING &&
- !(rpmflags & RPM_ASYNC))
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN;
+
if (retval)
goto out;
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
- if ((rpmflags & RPM_AUTO)
- && dev->power.runtime_status != RPM_SUSPENDING) {
+ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
@@ -520,7 +620,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
* rest.
*/
if (!(dev->power.timer_expires &&
- dev->power.timer_expires <= expires)) {
+ dev->power.timer_expires <= expires)) {
/*
* We add a slack of 25% to gather wakeups
* without sacrificing the granularity.
@@ -530,9 +630,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.timer_expires = expires;
hrtimer_start_range_ns(&dev->power.suspend_timer,
- ns_to_ktime(expires),
- slack,
- HRTIMER_MODE_ABS);
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
@@ -599,6 +699,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
+ dev_pm_enable_wake_irq_complete(dev);
+
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -616,8 +718,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe)
+ goto out;
+
/* Maybe the parent is now able to suspend. */
- if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ if (parent && !parent->power.ignore_children) {
spin_unlock(&dev->power.lock);
spin_lock(&parent->power.lock);
@@ -626,33 +731,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
}
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
fail:
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -680,16 +790,20 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume_rcuidle(dev, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
- if (dev->power.runtime_error)
+ if (dev->power.runtime_error) {
retval = -EINVAL;
- else if (dev->power.disable_depth == 1 && dev->power.is_suspended
- && dev->power.runtime_status == RPM_ACTIVE)
- retval = 1;
- else if (dev->power.disable_depth > 0)
- retval = -EACCES;
+ } else if (dev->power.disable_depth > 0) {
+ if (dev->power.runtime_status == RPM_ACTIVE &&
+ dev->power.last_status == RPM_ACTIVE)
+ retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
+ else
+ retval = -EACCES;
+ }
if (retval)
goto out;
@@ -708,15 +822,18 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
}
- if (dev->power.runtime_status == RPM_RESUMING
- || dev->power.runtime_status == RPM_SUSPENDING) {
+ if (dev->power.runtime_status == RPM_RESUMING ||
+ dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- if (dev->power.runtime_status == RPM_SUSPENDING)
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
- else
+ if (rpmflags & RPM_NOWAIT)
+ retval = -EINPROGRESS;
+ } else {
retval = -EINPROGRESS;
+ }
goto out;
}
@@ -733,8 +850,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
- if (dev->power.runtime_status != RPM_RESUMING
- && dev->power.runtime_status != RPM_SUSPENDING)
+ if (dev->power.runtime_status != RPM_RESUMING &&
+ dev->power.runtime_status != RPM_SUSPENDING)
break;
spin_unlock_irq(&dev->power.lock);
@@ -754,9 +871,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
*/
if (dev->power.no_callbacks && !parent && dev->parent) {
spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
- if (dev->parent->power.disable_depth > 0
- || dev->parent->power.ignore_children
- || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ if (dev->parent->power.disable_depth > 0 ||
+ dev->parent->power.ignore_children ||
+ dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
retval = 1;
@@ -785,6 +902,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
parent = dev->parent;
if (dev->power.irq_safe)
goto skip_parent;
+
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
@@ -794,8 +912,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
* Resume the parent if it has runtime PM enabled and not been
* set to ignore its children.
*/
- if (!parent->power.disable_depth
- && !parent->power.ignore_children) {
+ if (!parent->power.disable_depth &&
+ !parent->power.ignore_children) {
rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY;
@@ -805,6 +923,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock);
if (retval)
goto out;
+
goto repeat;
}
skip_parent:
@@ -816,7 +935,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -843,7 +962,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -892,7 +1011,7 @@ static void pm_runtime_work(struct work_struct *work)
/**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
- * @data: Device pointer passed by pm_schedule_suspend().
+ * @timer: hrtimer used by pm_schedule_suspend().
*
* Check if the time is right and queue a suspend request.
*/
@@ -909,7 +1028,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -928,7 +1047,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
- ktime_t expires;
+ u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@@ -945,8 +1064,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- expires = ktime_add(ktime_get(), ms_to_ktime(delay));
- dev->power.timer_expires = ktime_to_ns(expires);
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+ dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
@@ -957,13 +1076,33 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
}
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+static int rpm_drop_usage_count(struct device *dev)
+{
+ int ret;
+
+ ret = atomic_sub_return(1, &dev->power.usage_count);
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * Because rpm_resume() does not check the usage counter, it will resume
+ * the device even if the usage counter is 0 or negative, so it is
+ * sufficient to increment the usage counter here to reverse the change
+ * made above.
+ */
+ atomic_inc(&dev->power.usage_count);
+ dev_warn(dev, "Runtime PM usage count underflow!\n");
+ return -EINVAL;
+}
+
/**
* __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out an idle
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
@@ -975,8 +1114,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
+ trace_rpm_usage(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -995,7 +1139,8 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
- * return immediately if it is larger than zero. Then carry out a suspend,
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out a suspend,
* either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
@@ -1007,8 +1152,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
int retval;
if (rpmflags & RPM_GET_PUT) {
- if (!atomic_dec_and_test(&dev->power.usage_count))
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
+ trace_rpm_usage(dev, rpmflags);
return 0;
+ }
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
@@ -1052,27 +1202,88 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * pm_runtime_get_conditional - Conditionally bump up device usage counter.
* @dev: Device to handle.
+ * @ign_usage_count: Whether or not to look at the current usage counter value.
*
- * Return -EINVAL if runtime PM is disabled for the device.
+ * Return -EINVAL if runtime PM is disabled for @dev.
*
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
- * and the runtime PM usage counter is nonzero, increment the counter and
- * return 1. Otherwise return 0 without changing the counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
+ *
+ * If @ign_usage_count is %true, this function can be used to prevent suspending
+ * the device when its runtime PM status is %RPM_ACTIVE.
+ *
+ * If @ign_usage_count is %false, this function can be used to prevent
+ * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
+ * runtime PM usage counter is not zero.
+ *
+ * The caller is responsible for decrementing the runtime PM usage counter of
+ * @dev after this function has returned a positive value for it.
*/
-int pm_runtime_get_if_in_use(struct device *dev)
+static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- retval = dev->power.disable_depth > 0 ? -EINVAL :
- dev->power.runtime_status == RPM_ACTIVE
- && atomic_inc_not_zero(&dev->power.usage_count);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
+ trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
+
return retval;
}
+
+/**
+ * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
+ * in active state
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
+ * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
+ * device, in which case also the usage_count will remain unmodified.
+ */
+int pm_runtime_get_if_active(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+
+/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
+ */
+int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_conditional(dev, false);
+}
EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
/**
@@ -1091,12 +1302,19 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
* and the device parent's counter of unsuspended children is modified to
* reflect the new status. If the new status is RPM_SUSPENDED, an idle
* notification request for the parent is submitted.
+ *
+ * If @dev has any suppliers (as reflected by device links to them), and @status
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
+ * of the @status value) and the suppliers will be deacticated on exit. The
+ * error returned by the failing supplier activation will be returned in that
+ * case.
*/
int __pm_runtime_set_status(struct device *dev, unsigned int status)
{
struct device *parent = dev->parent;
- unsigned long flags;
bool notify_parent = false;
+ unsigned long flags;
int error = 0;
if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
@@ -1104,11 +1322,38 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
spin_lock_irqsave(&dev->power.lock, flags);
- if (!dev->power.runtime_error && !dev->power.disable_depth) {
+ /*
+ * Prevent PM-runtime from being enabled for the device or return an
+ * error if it is enabled already and working.
+ */
+ if (dev->power.runtime_error || dev->power.disable_depth)
+ dev->power.disable_depth++;
+ else
error = -EAGAIN;
- goto out;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (error)
+ return error;
+
+ /*
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
+ * upfront regardless of the current status, because next time
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
+ * involved will be dropped down to one anyway.
+ */
+ if (status == RPM_ACTIVE) {
+ int idx = device_links_read_lock();
+
+ error = rpm_get_suppliers(dev);
+ if (error)
+ status = RPM_SUSPENDED;
+
+ device_links_read_unlock(idx);
}
+ spin_lock_irqsave(&dev->power.lock, flags);
+
if (dev->power.runtime_status == status || !parent)
goto out_set;
@@ -1123,9 +1368,9 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
* not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
- if (!parent->power.disable_depth
- && !parent->power.ignore_children
- && parent->power.runtime_status != RPM_ACTIVE) {
+ if (!parent->power.disable_depth &&
+ !parent->power.ignore_children &&
+ parent->power.runtime_status != RPM_ACTIVE) {
dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
dev_name(dev),
dev_name(parent));
@@ -1136,19 +1381,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
spin_unlock(&parent->power.lock);
- if (error)
+ if (error) {
+ status = RPM_SUSPENDED;
goto out;
+ }
}
out_set:
__update_runtime_status(dev, status);
- dev->power.runtime_error = 0;
+ if (!error)
+ dev->power.runtime_error = 0;
+
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
if (notify_parent)
pm_request_idle(parent);
+ if (status == RPM_SUSPENDED) {
+ int idx = device_links_read_lock();
+
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
+ pm_runtime_enable(dev);
+
return error;
}
EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
@@ -1176,9 +1435,9 @@ static void __pm_runtime_barrier(struct device *dev)
dev->power.request_pending = false;
}
- if (dev->power.runtime_status == RPM_SUSPENDING
- || dev->power.runtime_status == RPM_RESUMING
- || dev->power.idle_notification) {
+ if (dev->power.runtime_status == RPM_SUSPENDING ||
+ dev->power.runtime_status == RPM_RESUMING ||
+ dev->power.idle_notification) {
DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */
@@ -1208,47 +1467,48 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1263,8 +1523,8 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
* means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
- if (check_resume && dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ if (check_resume && dev->power.request_pending &&
+ dev->power.request == RPM_REQ_RESUME) {
/*
* Prevent suspends and idle notifications from being carried
* out after we have woken up the device.
@@ -1276,8 +1536,13 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
pm_runtime_put_noidle(dev);
}
- if (!dev->power.disable_depth++)
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
+ if (!dev->power.disable_depth++) {
__pm_runtime_barrier(dev);
+ dev->power.last_status = dev->power.runtime_status;
+ }
out:
spin_unlock_irq(&dev->power.lock);
@@ -1294,29 +1559,107 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
- if (dev->power.disable_depth > 0)
- dev->power.disable_depth--;
- else
+ if (!dev->power.disable_depth) {
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ goto out;
+ }
- WARN(!dev->power.disable_depth &&
- dev->power.runtime_status == RPM_SUSPENDED &&
- !dev->power.ignore_children &&
- atomic_read(&dev->power.child_count) > 0,
- "Enabling runtime PM for inactive device (%s) with active children\n",
- dev_name(dev));
+ if (--dev->power.disable_depth > 0)
+ goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
+ dev->power.last_status = RPM_INVALID;
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+
+ if (dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)
+ dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
+
+out:
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
+static void pm_runtime_disable_action(void *data)
+{
+ pm_runtime_dont_use_autosuspend(data);
+ pm_runtime_disable(data);
+}
+
+/**
+ * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_enable(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
+ * Resume @dev if already suspended and block runtime suspend of @dev in such
+ * a way that it can be unblocked via the /sys/devices/.../power/control
+ * interface, or otherwise by calling pm_runtime_allow().
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_forbid(struct device *dev)
{
@@ -1337,17 +1680,28 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
- * Decrease the device's usage count and set its power.runtime_auto flag.
+ * Unblock runtime suspend of @dev after it has been blocked by
+ * pm_runtime_forbid() (for instance, if it has been blocked via the
+ * /sys/devices/.../power/control interface), check if @dev can be
+ * suspended and suspend it in that case.
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_allow(struct device *dev)
{
+ int ret;
+
spin_lock_irq(&dev->power.lock);
if (dev->power.runtime_auto)
goto out;
dev->power.runtime_auto = true;
- if (atomic_dec_and_test(&dev->power.usage_count))
+ ret = rpm_drop_usage_count(dev);
+ if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else if (ret > 0)
+ trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1387,6 +1741,7 @@ void pm_runtime_irq_safe(struct device *dev)
{
if (dev->parent)
pm_runtime_get_sync(dev->parent);
+
spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 1;
spin_unlock_irq(&dev->power.lock);
@@ -1415,6 +1770,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage(dev, 0);
}
}
@@ -1480,6 +1837,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.last_status = RPM_INVALID;
dev->power.idle_notification = false;
dev->power.disable_depth = 1;
@@ -1494,12 +1852,12 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.accounting_timestamp = jiffies;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1521,6 +1879,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1534,43 +1897,6 @@ void pm_runtime_remove(struct device *dev)
}
/**
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
- * @dev: Device whose driver is going to be removed.
- *
- * Check links from this device to any consumers and if any of them have active
- * runtime PM references to the device, drop the usage counter of the device
- * (once per link).
- *
- * Links with the DL_FLAG_STATELESS flag set are ignored.
- *
- * Since the device is guaranteed to be runtime-active at the point this is
- * called, nothing else needs to be done here.
- *
- * Moreover, this is called after device_links_busy() has returned 'false', so
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
- * therefore rpm_active can't be manipulated concurrently.
- */
-void pm_runtime_clean_up_links(struct device *dev)
-{
- struct device_link *link;
- int idx;
-
- idx = device_links_read_lock();
-
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
- if (link->flags & DL_FLAG_STATELESS)
- continue;
-
- if (link->rpm_active) {
- pm_runtime_put_noidle(dev);
- link->rpm_active = false;
- }
- }
-
- device_links_read_unlock(idx);
-}
-
-/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
@@ -1581,9 +1907,11 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
+ dev_for_each_link_to_supplier(link, dev)
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
+ link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1599,9 +1927,12 @@ void pm_runtime_put_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
- if (link->flags & DL_FLAG_PM_RUNTIME)
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
pm_runtime_put(link->supplier);
+ }
device_links_read_unlock(idx);
}
@@ -1613,23 +1944,49 @@ void pm_runtime_new_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
-void pm_runtime_drop_link(struct device *dev)
+static void pm_runtime_drop_link_count(struct device *dev)
{
- rpm_put_suppliers(dev);
-
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
spin_unlock_irq(&dev->power.lock);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
+}
+
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
+{
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1653,71 +2010,106 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
+ dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
if (ret)
goto err;
+ dev_pm_enable_wake_irq_complete(dev);
+
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
else
- __update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = true;
return 0;
err:
+ dev_pm_disable_wake_irq_check(dev, true);
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
*
- * Typically this function may be invoked from a system resume callback.
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
+ *
+ * In other cases, the resume is deferred to be managed via runtime PM.
+ *
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
+ dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
goto out;
}
pm_runtime_mark_last_busy(dev);
+
out:
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d713738ce796..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,8 +1,7 @@
-/*
- * drivers/base/power/sysfs.c - sysfs entries for device PM
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
#include <linux/device.h>
+#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
@@ -101,8 +100,8 @@ static const char ctrl_on[] = "on";
static ssize_t control_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n",
- dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+ return sysfs_emit(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
}
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
@@ -122,70 +121,71 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(control);
static ssize_t runtime_active_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
- spin_unlock_irq(&dev->power.lock);
- return ret;
+ u64 tmp = pm_runtime_active_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_active_time);
static ssize_t runtime_suspended_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- int ret;
- spin_lock_irq(&dev->power.lock);
- update_pm_runtime_accounting(dev);
- ret = sprintf(buf, "%i\n",
- jiffies_to_msecs(dev->power.suspended_jiffies));
- spin_unlock_irq(&dev->power.lock);
- return ret;
+ u64 tmp = pm_runtime_suspended_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_suspended_time);
static ssize_t runtime_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- const char *p;
+ const char *output;
if (dev->power.runtime_error) {
- p = "error\n";
+ output = "error";
} else if (dev->power.disable_depth) {
- p = "unsupported\n";
+ output = "unsupported";
} else {
switch (dev->power.runtime_status) {
case RPM_SUSPENDED:
- p = "suspended\n";
+ output = "suspended";
break;
case RPM_SUSPENDING:
- p = "suspending\n";
+ output = "suspending";
break;
case RPM_RESUMING:
- p = "resuming\n";
+ output = "resuming";
break;
case RPM_ACTIVE:
- p = "active\n";
+ output = "active";
break;
default:
return -EIO;
}
}
- return sprintf(buf, p);
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
if (!dev->power.use_autosuspend)
return -EIO;
- return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
+
+ return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
}
static ssize_t autosuspend_delay_ms_store(struct device *dev,
@@ -214,11 +214,11 @@ static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
- return sprintf(buf, "n/a\n");
+ return sysfs_emit(buf, "n/a\n");
if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
@@ -258,11 +258,11 @@ static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
- return sprintf(buf, "auto\n");
+ return sysfs_emit(buf, "%s\n", "auto");
if (value == PM_QOS_LATENCY_ANY)
- return sprintf(buf, "any\n");
+ return sysfs_emit(buf, "%s\n", "any");
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
@@ -294,8 +294,8 @@ static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
- & PM_QOS_FLAG_NO_POWER_OFF));
+ return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
}
static ssize_t pm_qos_no_power_off_store(struct device *dev,
@@ -323,9 +323,9 @@ static const char _disabled[] = "disabled";
static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", device_can_wakeup(dev)
- ? (device_may_wakeup(dev) ? _enabled : _disabled)
- : "");
+ return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
+ : "");
}
static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
@@ -348,7 +348,7 @@ static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -357,7 +357,10 @@ static ssize_t wakeup_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_count);
@@ -366,7 +369,7 @@ static ssize_t wakeup_active_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -375,7 +378,10 @@ static ssize_t wakeup_active_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_active_count);
@@ -384,7 +390,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -393,7 +399,10 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_abort_count);
@@ -402,7 +411,7 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -411,7 +420,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_expire_count);
@@ -419,7 +431,7 @@ static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned int active = 0;
+ unsigned int active;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -428,7 +440,10 @@ static ssize_t wakeup_active_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%u\n", active);
}
static DEVICE_ATTR_RO(wakeup_active);
@@ -437,7 +452,7 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -446,7 +461,10 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_total_time_ms);
@@ -454,7 +472,7 @@ static DEVICE_ATTR_RO(wakeup_total_time_ms);
static ssize_t wakeup_max_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -463,7 +481,10 @@ static ssize_t wakeup_max_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_max_time_ms);
@@ -472,7 +493,7 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -481,7 +502,10 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_last_time_ms);
@@ -491,7 +515,7 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -500,18 +524,36 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
-#endif /* CONFIG_PM_SLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
+#else /* CONFIG_PM_SLEEP */
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+#endif
#ifdef CONFIG_PM_ADVANCED_DEBUG
static ssize_t runtime_usage_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
static DEVICE_ATTR_RO(runtime_usage);
@@ -519,21 +561,26 @@ static ssize_t runtime_active_kids_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev->power.ignore_children ?
- 0 : atomic_read(&dev->power.child_count));
+ return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
}
static DEVICE_ATTR_RO(runtime_active_kids);
static ssize_t runtime_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (dev->power.disable_depth && (dev->power.runtime_auto == false))
- return sprintf(buf, "disabled & forbidden\n");
- if (dev->power.disable_depth)
- return sprintf(buf, "disabled\n");
- if (dev->power.runtime_auto == false)
- return sprintf(buf, "forbidden\n");
- return sprintf(buf, "enabled\n");
+ const char *output;
+
+ if (dev->power.disable_depth && !dev->power.runtime_auto)
+ output = "disabled & forbidden";
+ else if (dev->power.disable_depth)
+ output = "disabled";
+ else if (!dev->power.runtime_auto)
+ output = "forbidden";
+ else
+ output = "enabled";
+
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_enabled);
@@ -541,9 +588,9 @@ static DEVICE_ATTR_RO(runtime_enabled);
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n",
- device_async_suspend_enabled(dev) ?
- _enabled : _disabled);
+ return sysfs_emit(buf, "%s\n",
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
@@ -564,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -603,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
@@ -648,11 +692,15 @@ int dpm_sysfs_add(struct device *dev)
{
int rc;
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
- if (pm_runtime_callbacks_present(dev)) {
+ if (!pm_runtime_has_no_callbacks(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
goto err_out;
@@ -668,8 +716,13 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_wakeup;
}
+ rc = pm_wakeup_source_sysfs_add(dev);
+ if (rc)
+ goto err_latency;
return 0;
+ err_latency:
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
err_wakeup:
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
err_runtime:
@@ -679,14 +732,59 @@ int dpm_sysfs_add(struct device *dev)
return rc;
}
+int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int rc;
+
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+
+ if (!pm_runtime_has_no_callbacks(dev)) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
+ kuid, kgid);
+ if (rc)
+ return rc;
+
+ rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
+ kgid);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
int wakeup_sysfs_add(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+ if (!ret)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ return ret;
}
void wakeup_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
int pm_qos_sysfs_add_resume_latency(struct device *dev)
@@ -727,6 +825,8 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ if (device_pm_not_required(dev))
+ return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index b11f47a1e819..d8da7195bb00 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/trace.c
*
@@ -6,11 +7,13 @@
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
+#define pr_fmt(fmt) "PM: " fmt
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
+#include <linux/init.h>
#include <linux/mc146818rtc.h>
@@ -117,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- mc146818_get_time(&time);
+ if (mc146818_get_time(&time, 1000) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
@@ -163,6 +170,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;
+ if (!x86_platform.legacy.rtc)
+ return;
+
user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -228,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
- int len = snprintf(buf, size, "%s\n",
+ int len = scnprintf(buf, size, "%s\n",
dev_driver_string(dev));
- if (len > size)
- len = size;
buf += len;
ret += len;
size -= len;
@@ -263,18 +271,24 @@ static struct notifier_block pm_trace_nb = {
.notifier_call = pm_trace_notify,
};
-static int early_resume_init(void)
+static int __init early_resume_init(void)
{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
}
-static int late_resume_init(void)
+static int __init late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b8fa5c0f2d13..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -1,16 +1,5 @@
-/*
- * wakeirq.c - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -23,14 +12,11 @@
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
- * @irq: Device wake-up capable interrupt
* @wirq: Wake irq specific data
*
- * Internal function to attach either a device IO interrupt or a
- * dedicated wake-up interrupt as a wake IRQ.
+ * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
*/
-static int dev_pm_attach_wake_irq(struct device *dev, int irq,
- struct wake_irq *wirq)
+static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
{
unsigned long flags;
@@ -76,7 +62,7 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
kfree(wirq);
@@ -117,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
@@ -156,24 +168,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
-/**
- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
- * @dev: Device entry
- * @irq: Device wake-up interrupt
- *
- * Unless your hardware has separate wake-up interrupts in addition
- * to the device IO interrupts, you don't need this.
- *
- * Sets up a threaded interrupt handler for a device that has
- * a dedicated wake-up interrupt in addition to the device IO
- * interrupt.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
- * functions.
- */
-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@@ -193,7 +188,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev;
wirq->irq = irq;
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
@@ -203,15 +197,16 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
* so we use a threaded irq.
*/
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
- IRQF_ONESHOT, wirq->name, wirq);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ wirq->name, wirq);
if (err)
goto err_free_name;
- err = dev_pm_attach_wake_irq(dev, irq, wirq);
+ err = dev_pm_attach_wake_irq(dev, wirq);
if (err)
goto err_free_irq;
- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@@ -224,45 +219,45 @@ err_free:
return err;
}
-EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
- * dev_pm_enable_wake_irq - Enable device wake-up interrupt
- * @dev: Device
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
*
- * Optionally called from the bus code or the device driver for
- * runtime_resume() to override the PM runtime core managed wake-up
- * interrupt handling to enable the wake-up interrupt.
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
*
- * Note that for runtime_suspend()) the wake-up interrupts
- * should be unconditionally enabled unlike for suspend()
- * that is conditional.
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
*/
-void dev_pm_enable_wake_irq(struct device *dev)
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
- struct wake_irq *wirq = dev->power.wakeirq;
-
- if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
- enable_irq(wirq->irq);
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
}
-EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
- * dev_pm_disable_wake_irq - Disable device wake-up interrupt
- * @dev: Device
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
*
- * Optionally called from the bus code or the device driver for
- * runtime_suspend() to override the PM runtime core managed wake-up
- * interrupt handling to disable the wake-up interrupt.
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
*/
-void dev_pm_disable_wake_irq(struct device *dev)
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
- struct wake_irq *wirq = dev->power.wakeirq;
-
- if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
- disable_irq_nosync(wirq->irq);
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
}
-EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
/**
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
@@ -271,7 +266,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
- * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
@@ -283,7 +278,7 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
@@ -296,25 +291,58 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
- enable_irq(wirq->irq);
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+ enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
-void dev_pm_disable_wake_irq_check(struct device *dev)
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
- if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
+ enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
@@ -331,7 +359,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@@ -354,7 +382,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
- !pm_runtime_status_suspended(wirq->dev))
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fa1898755a3..1e1a0e7eeac5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
+#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
#include <linux/slab.h>
@@ -19,11 +19,9 @@
#include "power.h"
-#ifndef CONFIG_SUSPEND
-suspend_state_t pm_suspend_target_state;
-#define pm_suspend_target_state (PM_SUSPEND_ON)
-#endif
-
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -31,7 +29,8 @@ suspend_state_t pm_suspend_target_state;
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
@@ -72,56 +71,41 @@ static struct wakeup_source deleted_ws = {
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
};
-/**
- * wakeup_source_prepare - Prepare a new wakeup source for initialization.
- * @ws: Wakeup source to prepare.
- * @name: Pointer to the name of the new wakeup source.
- *
- * Callers must ensure that the @name string won't be freed when @ws is still in
- * use.
- */
-void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
-{
- if (ws) {
- memset(ws, 0, sizeof(*ws));
- ws->name = name;
- }
-}
-EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+static DEFINE_IDA(wakeup_ida);
/**
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
+ const char *ws_name;
+ int id;
- ws = kmalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
- return NULL;
+ goto err_ws;
- wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
- return ws;
-}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
+ ws_name = kstrdup_const(name, GFP_KERNEL);
+ if (!ws_name)
+ goto err_name;
+ ws->name = ws_name;
-/**
- * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
- * @ws: Wakeup source to prepare for destruction.
- *
- * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
- * be run in parallel with this function for the same wakeup source object.
- */
-void wakeup_source_drop(struct wakeup_source *ws)
-{
- if (!ws)
- return;
+ id = ida_alloc(&wakeup_ida, GFP_KERNEL);
+ if (id < 0)
+ goto err_id;
+ ws->id = id;
- del_timer_sync(&ws->timer);
- __pm_relax(ws);
+ return ws;
+
+err_id:
+ kfree_const(ws->name);
+err_name:
+ kfree(ws);
+err_ws:
+ return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_drop);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -151,29 +135,34 @@ static void wakeup_source_record(struct wakeup_source *ws)
spin_unlock_irqrestore(&deleted_ws.lock, flags);
}
+static void wakeup_source_free(struct wakeup_source *ws)
+{
+ ida_free(&wakeup_ida, ws->id);
+ kfree_const(ws->name);
+ kfree(ws);
+}
+
/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
- wakeup_source_drop(ws);
+ __pm_relax(ws);
wakeup_source_record(ws);
- kfree_const(ws->name);
- kfree(ws);
+ wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -188,38 +177,52 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
if (WARN_ON(!ws))
return;
+ /*
+ * After shutting down the timer, wakeup_source_activate() will warn if
+ * the given wakeup source is passed to it.
+ */
+ timer_shutdown_sync(&ws->timer);
+
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
+ * @dev: Device this wakeup source is associated with (or NULL if virtual).
* @name: Name of the wakeup source to register.
*/
-struct wakeup_source *wakeup_source_register(const char *name)
+struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
{
struct wakeup_source *ws;
+ int ret;
ws = wakeup_source_create(name);
- if (ws)
+ if (ws) {
+ if (!dev || device_is_registered(dev)) {
+ ret = wakeup_source_sysfs_add(dev, ws);
+ if (ret) {
+ wakeup_source_free(ws);
+ return NULL;
+ }
+ }
wakeup_source_add(ws);
-
+ }
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_register);
@@ -232,12 +235,69 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
wakeup_source_destroy(ws);
}
}
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
/**
+ * wakeup_sources_read_lock - Lock wakeup source list for read.
+ *
+ * Returns an index of srcu lock for struct wakeup_srcu.
+ * This index must be passed to the matching wakeup_sources_read_unlock().
+ */
+int wakeup_sources_read_lock(void)
+{
+ return srcu_read_lock(&wakeup_srcu);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
+
+/**
+ * wakeup_sources_read_unlock - Unlock wakeup source list.
+ * @idx: return value from corresponding wakeup_sources_read_lock()
+ */
+void wakeup_sources_read_unlock(int idx)
+{
+ srcu_read_unlock(&wakeup_srcu, idx);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
+
+/**
+ * wakeup_sources_walk_start - Begin a walk on wakeup source list
+ *
+ * Returns first object of the list of wakeup sources.
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_start(void)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
+
+/**
+ * wakeup_sources_walk_next - Get next wakeup source from the list
+ * @ws: Previous wakeup source object
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_next_or_null_rcu(ws_head, &ws->entry,
+ struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
+
+/**
* device_wakeup_attach - Attach a wakeup source object to a device object.
* @dev: Device to handle.
* @ws: Wakeup source object to attach to @dev.
@@ -272,10 +332,10 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
- ws = wakeup_source_register(dev_name(dev));
+ ws = wakeup_source_register(dev, dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -331,9 +391,9 @@ void device_wakeup_detach_irq(struct device *dev)
}
/**
- * device_wakeup_arm_wake_irqs(void)
+ * device_wakeup_arm_wake_irqs -
*
- * Itereates over the list of device wakeirqs to arm them.
+ * Iterates over the list of device wakeirqs to arm them.
*/
void device_wakeup_arm_wake_irqs(void)
{
@@ -341,15 +401,15 @@ void device_wakeup_arm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
- * device_wakeup_disarm_wake_irqs(void)
+ * device_wakeup_disarm_wake_irqs -
*
- * Itereates over the list of device wakeirqs to disarm them.
+ * Iterates over the list of device wakeirqs to disarm them.
*/
void device_wakeup_disarm_wake_irqs(void)
{
@@ -357,7 +417,7 @@ void device_wakeup_disarm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -386,16 +446,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -431,54 +490,29 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
/**
- * device_init_wakeup - Device wakeup initialization.
- * @dev: Device to handle.
- * @enable: Whether or not to enable @dev as a wakeup device.
- *
- * By default, most devices should leave wakeup disabled. The exceptions are
- * devices that everyone expects to be wakeup sources: keyboards, power buttons,
- * possibly network interfaces, etc. Also, devices that don't generate their
- * own wakeup requests but merely forward requests from one bus to another
- * (like PCI bridges) should have wakeup enabled by default.
- */
-int device_init_wakeup(struct device *dev, bool enable)
-{
- int ret = 0;
-
- if (!dev)
- return -EINVAL;
-
- if (enable) {
- device_set_wakeup_capable(dev, true);
- ret = device_wakeup_enable(dev);
- } else {
- device_wakeup_disable(dev);
- device_set_wakeup_capable(dev, false);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_init_wakeup);
-
-/**
* device_set_wakeup_enable - Enable or disable a device to wake up the system.
* @dev: Device to handle.
+ * @enable: enable/disable flag
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
- * wakeup_source_not_registered - validate the given wakeup source.
+ * wakeup_source_not_usable - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
-static bool wakeup_source_not_registered(struct wakeup_source *ws)
+static bool wakeup_source_not_usable(struct wakeup_source *ws)
{
/*
- * Use timer struct to check if the given source is initialized
- * by wakeup_source_add.
+ * Use the timer struct to check if the given wakeup source has been
+ * initialized by wakeup_source_add() and it is not going away.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
@@ -512,19 +546,18 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
*/
/**
- * wakup_source_activate - Mark given wakeup source as active.
+ * wakeup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
+ * core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
- if (WARN_ONCE(wakeup_source_not_registered(ws),
- "unregistered wakeup source\n"))
+ if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
return;
ws->active = true;
@@ -574,7 +607,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
@@ -617,7 +650,7 @@ static inline void update_prevent_sleep_time(struct wakeup_source *ws,
#endif
/**
- * wakup_source_deactivate - Mark given wakeup source as inactive.
+ * wakeup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and notify the PM core that the wakeup source has
@@ -654,7 +687,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
ws->last_time = now;
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)
@@ -662,7 +695,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
/*
* Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
+ * counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
@@ -716,7 +749,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
/**
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
- * @data: Address of the wakeup source object associated with the event source.
+ * @t: timer list
*
* Call wakeup_source_deactivate() for the wakeup source whose address is stored
* in @data if it is currently active and its timer has not been canceled and
@@ -724,7 +757,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = from_timer(ws, t, timer);
+ struct wakeup_source *ws = timer_container_of(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
@@ -783,7 +816,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
- * pm_wakeup_event - Notify the PM core of a wakeup event.
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
* @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
@@ -810,9 +843,9 @@ void pm_print_active_wakeup_sources(void)
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_debug("active wakeup source: %s\n", ws->name);
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -823,7 +856,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_debug("last active wakeup source: %s\n",
+ pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -853,12 +886,13 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("PM: Wakeup pending, aborting suspend\n");
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
return ret || atomic_read(&pm_abort_suspend) > 0;
}
+EXPORT_SYMBOL_GPL(pm_wakeup_pending);
void pm_system_wakeup(void)
{
@@ -869,22 +903,50 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_system_cancel_wakeup(void)
{
- atomic_dec(&pm_abort_suspend);
+ atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
@@ -952,7 +1014,7 @@ bool pm_save_wakeup_count(unsigned int count)
#ifdef CONFIG_PM_AUTOSLEEP
/**
* pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
- * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ * @set: Whether to set or to clear the autosleep_enabled flags.
*/
void pm_wakep_autosleep_enabled(bool set)
{
@@ -961,7 +1023,7 @@ void pm_wakep_autosleep_enabled(bool set)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
@@ -978,8 +1040,6 @@ void pm_wakep_autosleep_enabled(bool set)
}
#endif /* CONFIG_PM_AUTOSLEEP */
-static struct dentry *wakeup_sources_stats_dentry;
-
/**
* print_wakeup_source_stats - Print wakeup source statistics information.
* @m: seq_file to print the statistics into.
@@ -1042,7 +1102,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m,
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
@@ -1063,6 +1123,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m,
break;
}
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
return next_ws;
}
@@ -1109,8 +1172,8 @@ static const struct file_operations wakeup_sources_stats_fops = {
static int __init wakeup_sources_debugfs_init(void)
{
- wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
- S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
+ debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
+ &wakeup_sources_stats_fops);
return 0;
}
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
new file mode 100644
index 000000000000..3ffd427248e8
--- /dev/null
+++ b/drivers/base/power/wakeup_stats.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wakeup statistics in sysfs
+ *
+ * Copyright (c) 2019 Linux Foundation
+ * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2019 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "power.h"
+
+static struct class *wakeup_class;
+
+#define wakeup_attr(_name) \
+static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wakeup_source *ws = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%lu\n", ws->_name); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+wakeup_attr(active_count);
+wakeup_attr(event_count);
+wakeup_attr(wakeup_count);
+wakeup_attr(expire_count);
+wakeup_attr(relax_count);
+
+static ssize_t active_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time =
+ ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time));
+}
+static DEVICE_ATTR_RO(active_time_ms);
+
+static ssize_t total_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t total_time = ws->total_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time));
+}
+static DEVICE_ATTR_RO(total_time_ms);
+
+static ssize_t max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t max_time = ws->max_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ if (active_time > max_time)
+ max_time = active_time;
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time));
+}
+static DEVICE_ATTR_RO(max_time_ms);
+
+static ssize_t last_change_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time));
+}
+static DEVICE_ATTR_RO(last_change_ms);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ws->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t prevent_suspend_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t prevent_sleep_time = ws->prevent_sleep_time;
+
+ if (ws->active && ws->autosleep_enabled) {
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(ktime_get(), ws->start_prevent_time));
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+}
+static DEVICE_ATTR_RO(prevent_suspend_time_ms);
+
+static struct attribute *wakeup_source_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_active_count.attr,
+ &dev_attr_event_count.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
+ &dev_attr_active_time_ms.attr,
+ &dev_attr_total_time_ms.attr,
+ &dev_attr_max_time_ms.attr,
+ &dev_attr_last_change_ms.attr,
+ &dev_attr_prevent_suspend_time_ms.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wakeup_source);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *wakeup_source_device_create(struct device *parent,
+ struct wakeup_source *ws)
+{
+ struct device *dev = NULL;
+ int retval;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = MKDEV(0, 0);
+ dev->class = wakeup_class;
+ dev->parent = parent;
+ dev->groups = wakeup_source_groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, ws);
+ device_set_pm_not_required(dev);
+
+ retval = dev_set_name(dev, "wakeup%d", ws->id);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
+ * @parent: Device given wakeup source is associated with (or NULL if virtual).
+ * @ws: Wakeup source to be added in sysfs.
+ */
+int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
+{
+ struct device *dev;
+
+ dev = wakeup_source_device_create(parent, ws);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ ws->dev = dev;
+
+ return 0;
+}
+
+/**
+ * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
+ * for a device if they're missing.
+ * @parent: Device given wakeup source is associated with
+ */
+int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ if (!parent->power.wakeup || parent->power.wakeup->dev)
+ return 0;
+
+ return wakeup_source_sysfs_add(parent, parent->power.wakeup);
+}
+
+/**
+ * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
+ * @ws: Wakeup source to be removed from sysfs.
+ */
+void wakeup_source_sysfs_remove(struct wakeup_source *ws)
+{
+ device_unregister(ws->dev);
+}
+
+static int __init wakeup_sources_sysfs_init(void)
+{
+ wakeup_class = class_create("wakeup");
+
+ return PTR_ERR_OR_ZERO(wakeup_class);
+}
+postcore_initcall(wakeup_sources_sysfs_init);