diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-06-23 14:54:39 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-07-03 16:53:33 +0200 |
commit | ed18738fff025df2a424d3b21e895992e6cb230a (patch) | |
tree | 37d3d074d6094ef31e67a4863608c2c615c70f02 | |
parent | 5e8be76a7c37b98876704cf211ac0ab674304f4f (diff) |
PM: sleep: Make async resume handle consumers like children
Avoid starting "async" resume processing upfront for devices that have
suppliers and start "async" resume processing for a device's consumers
right after resuming the device itself.
Suggested-by: Saravana Kannan <saravanak@google.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://patch.msgid.link/3378088.aeNJFYEL58@rjwysocki.net
-rw-r--r-- | drivers/base/power/main.c | 36 |
1 files changed, 28 insertions, 8 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e6fc52b85295..d7ef5048a452 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func) /* * Start processing "async" children of the device unless it's been * started already for them. - * - * This could have been done for the device's "async" consumers too, but - * they either need to wait for their parents or the processing has - * already started for them after their parents were processed. */ device_for_each_child(dev, func, dpm_async_with_cleanup); } +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); +} + static void dpm_clear_async_state(struct device *dev) { reinit_completion(&dev->power.completion); @@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev) static bool dpm_root_device(struct device *dev) { - return !dev->parent; + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); } static void async_resume_noirq(void *data, async_cookie_t cookie); @@ -752,7 +772,7 @@ Out: pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } - dpm_async_resume_children(dev, async_resume_noirq); + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -895,7 +915,7 @@ Out: pm_dev_err(dev, state, async ? " async early" : " early", error); } - dpm_async_resume_children(dev, async_resume_early); + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -1071,7 +1091,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) pm_dev_err(dev, state, async ? " async" : "", error); } - dpm_async_resume_children(dev, async_resume); + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) |