diff options
Diffstat (limited to 'kernel/async.c')
| -rw-r--r-- | kernel/async.c | 209 |
1 files changed, 119 insertions, 90 deletions
diff --git a/kernel/async.c b/kernel/async.c index a893d6170944..4c3e6a44595f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * async.c: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. */ @@ -50,11 +46,12 @@ asynchronous and synchronous parts of the kernel. #include <linux/async.h> #include <linux/atomic.h> -#include <linux/ktime.h> #include <linux/export.h> -#include <linux/wait.h> +#include <linux/ktime.h> +#include <linux/pid.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include "workqueue_internal.h" @@ -67,6 +64,7 @@ static async_cookie_t next_cookie = 1; static LIST_HEAD(async_global_pending); /* pending from all registered doms */ static ASYNC_DOMAIN(async_dfl_domain); static DEFINE_SPINLOCK(async_lock); +static struct workqueue_struct *async_wq; struct async_entry { struct list_head domain_list; @@ -82,6 +80,12 @@ static DECLARE_WAIT_QUEUE_HEAD(async_done); static atomic_t entry_count; +static long long microseconds_since(ktime_t start) +{ + ktime_t now = ktime_get(); + return ktime_to_ns(ktime_sub(now, start)) >> 10; +} + static async_cookie_t lowest_in_progress(struct async_domain *domain) { struct async_entry *first = NULL; @@ -115,24 +119,18 @@ static void async_run_entry_fn(struct work_struct *work) struct async_entry *entry = container_of(work, struct async_entry, work); unsigned long flags; - ktime_t uninitialized_var(calltime), delta, rettime; + ktime_t calltime; /* 1) run (and print duration) */ - if (initcall_debug && system_state < SYSTEM_RUNNING) { - pr_debug("calling %lli_%pF @ %i\n", - (long long)entry->cookie, - entry->func, task_pid_nr(current)); - calltime = ktime_get(); - } + pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie, + entry->func, task_pid_nr(current)); + calltime = ktime_get(); + entry->func(entry->data, entry->cookie); - if (initcall_debug && system_state < SYSTEM_RUNNING) { - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n", - (long long)entry->cookie, - entry->func, - (long long)ktime_to_ns(delta) >> 10); - } + + pr_debug("initcall %lli_%pS returned after %lld usecs\n", + (long long)entry->cookie, entry->func, + microseconds_since(calltime)); /* 2) remove self from the pending queues */ spin_lock_irqsave(&async_lock, flags); @@ -149,29 +147,14 @@ static void async_run_entry_fn(struct work_struct *work) wake_up(&async_done); } -static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) +static async_cookie_t __async_schedule_node_domain(async_func_t func, + void *data, int node, + struct async_domain *domain, + struct async_entry *entry) { - struct async_entry *entry; - unsigned long flags; async_cookie_t newcookie; + unsigned long flags; - /* allow irq-off callers */ - entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); - - /* - * If we're out of memory or if there's too much work - * pending already, we execute synchronously. - */ - if (!entry || atomic_read(&entry_count) > MAX_WORK) { - kfree(entry); - spin_lock_irqsave(&async_lock, flags); - newcookie = next_cookie++; - spin_unlock_irqrestore(&async_lock, flags); - - /* low on memory.. run synchronously */ - func(data, newcookie); - return newcookie; - } INIT_LIST_HEAD(&entry->domain_list); INIT_LIST_HEAD(&entry->global_list); INIT_WORK(&entry->work, async_run_entry_fn); @@ -191,76 +174,116 @@ static async_cookie_t __async_schedule(async_func_t func, void *data, struct asy atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* mark that this task has queued an async job, used by module init */ - current->flags |= PF_USED_ASYNC; - /* schedule for execution */ - queue_work(system_unbound_wq, &entry->work); + queue_work_node(node, async_wq, &entry->work); return newcookie; } /** - * async_schedule - schedule a function for asynchronous execution + * async_schedule_node_domain - NUMA specific version of async_schedule_domain * @func: function to execute asynchronously * @data: data pointer to pass to the function + * @node: NUMA node that we want to schedule this on or close to + * @domain: the domain * * Returns an async_cookie_t that may be used for checkpointing later. + * @domain may be used in the async_synchronize_*_domain() functions to + * wait within a certain synchronization domain rather than globally. + * * Note: This function may be called from atomic or non-atomic contexts. + * + * The node requested will be honored on a best effort basis. If the node + * has no CPUs associated with it then the work is distributed among all + * available CPUs. */ -async_cookie_t async_schedule(async_func_t func, void *data) +async_cookie_t async_schedule_node_domain(async_func_t func, void *data, + int node, struct async_domain *domain) { - return __async_schedule(func, data, &async_dfl_domain); + struct async_entry *entry; + unsigned long flags; + async_cookie_t newcookie; + + /* allow irq-off callers */ + entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); + + /* + * If we're out of memory or if there's too much work + * pending already, we execute synchronously. + */ + if (!entry || atomic_read(&entry_count) > MAX_WORK) { + kfree(entry); + spin_lock_irqsave(&async_lock, flags); + newcookie = next_cookie++; + spin_unlock_irqrestore(&async_lock, flags); + + /* low on memory.. run synchronously */ + func(data, newcookie); + return newcookie; + } + + return __async_schedule_node_domain(func, data, node, domain, entry); } -EXPORT_SYMBOL_GPL(async_schedule); +EXPORT_SYMBOL_GPL(async_schedule_node_domain); /** - * async_schedule_domain - schedule a function for asynchronous execution within a certain domain + * async_schedule_node - NUMA specific version of async_schedule * @func: function to execute asynchronously * @data: data pointer to pass to the function - * @domain: the domain + * @node: NUMA node that we want to schedule this on or close to * * Returns an async_cookie_t that may be used for checkpointing later. - * @domain may be used in the async_synchronize_*_domain() functions to - * wait within a certain synchronization domain rather than globally. A - * synchronization domain is specified via @domain. Note: This function - * may be called from atomic or non-atomic contexts. + * Note: This function may be called from atomic or non-atomic contexts. + * + * The node requested will be honored on a best effort basis. If the node + * has no CPUs associated with it then the work is distributed among all + * available CPUs. */ -async_cookie_t async_schedule_domain(async_func_t func, void *data, - struct async_domain *domain) +async_cookie_t async_schedule_node(async_func_t func, void *data, int node) { - return __async_schedule(func, data, domain); + return async_schedule_node_domain(func, data, node, &async_dfl_domain); } -EXPORT_SYMBOL_GPL(async_schedule_domain); +EXPORT_SYMBOL_GPL(async_schedule_node); /** - * async_synchronize_full - synchronize all asynchronous function calls + * async_schedule_dev_nocall - A simplified variant of async_schedule_dev() + * @func: function to execute asynchronously + * @dev: device argument to be passed to function * - * This function waits until all asynchronous function calls have been done. + * @dev is used as both the argument for the function and to provide NUMA + * context for where to run the function. + * + * If the asynchronous execution of @func is scheduled successfully, return + * true. Otherwise, do nothing and return false, unlike async_schedule_dev() + * that will run the function synchronously then. */ -void async_synchronize_full(void) +bool async_schedule_dev_nocall(async_func_t func, struct device *dev) { - async_synchronize_full_domain(NULL); + struct async_entry *entry; + + entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL); + + /* Give up if there is no memory or too much work. */ + if (!entry || atomic_read(&entry_count) > MAX_WORK) { + kfree(entry); + return false; + } + + __async_schedule_node_domain(func, dev, dev_to_node(dev), + &async_dfl_domain, entry); + return true; } -EXPORT_SYMBOL_GPL(async_synchronize_full); /** - * async_unregister_domain - ensure no more anonymous waiters on this domain - * @domain: idle domain to flush out of any async_synchronize_full instances - * - * async_synchronize_{cookie|full}_domain() are not flushed since callers - * of these routines should know the lifetime of @domain + * async_synchronize_full - synchronize all asynchronous function calls * - * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing + * This function waits until all asynchronous function calls have been done. */ -void async_unregister_domain(struct async_domain *domain) +void async_synchronize_full(void) { - spin_lock_irq(&async_lock); - WARN_ON(!domain->registered || !list_empty(&domain->pending)); - domain->registered = 0; - spin_unlock_irq(&async_lock); + async_synchronize_full_domain(NULL); } -EXPORT_SYMBOL_GPL(async_unregister_domain); +EXPORT_SYMBOL_GPL(async_synchronize_full); /** * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain @@ -286,23 +309,15 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); */ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) { - ktime_t uninitialized_var(starttime), delta, endtime; + ktime_t starttime; - if (initcall_debug && system_state < SYSTEM_RUNNING) { - pr_debug("async_waiting @ %i\n", task_pid_nr(current)); - starttime = ktime_get(); - } + pr_debug("async_waiting @ %i\n", task_pid_nr(current)); + starttime = ktime_get(); wait_event(async_done, lowest_in_progress(domain) >= cookie); - if (initcall_debug && system_state < SYSTEM_RUNNING) { - endtime = ktime_get(); - delta = ktime_sub(endtime, starttime); - - pr_debug("async_continuing @ %i after %lli usec\n", - task_pid_nr(current), - (long long)ktime_to_ns(delta) >> 10); - } + pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current), + microseconds_since(starttime)); } EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); @@ -331,3 +346,17 @@ bool current_is_async(void) return worker && worker->current_func == async_run_entry_fn; } EXPORT_SYMBOL_GPL(current_is_async); + +void __init async_init(void) +{ + /* + * Async can schedule a number of interdependent work items. However, + * unbound workqueues can handle only upto min_active interdependent + * work items. The default min_active of 8 isn't sufficient for async + * and can lead to stalls. Let's use a dedicated workqueue with raised + * min_active. + */ + async_wq = alloc_workqueue("async", WQ_UNBOUND, 0); + BUG_ON(!async_wq); + workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE); +} |
