From 2d5f0764b5264d2954ba6e3deb04f4f5de8e4476 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 27 Apr 2015 17:58:38 +0800 Subject: workqueue: split apply_workqueue_attrs() into 3 stages Current apply_workqueue_attrs() includes pwqs-allocation and pwqs-installation, so when we batch multiple apply_workqueue_attrs()s as a transaction, we can't ensure the transaction must succeed or fail as a complete unit. To solve this, we split apply_workqueue_attrs() into three stages. The first stage does the preparation: allocation memory, pwqs. The second stage does the attrs-installaion and pwqs-installation. The third stage frees the allocated memory and (old or unused) pwqs. As the result, batching multiple apply_workqueue_attrs()s can succeed or fail as a complete unit: 1) batch do all the first stage for all the workqueues 2) only commit all when all the above succeed. This patch is a preparation for the next patch ("Allow modifying low level unbound workqueue cpumask") which will do a multiple apply_workqueue_attrs(). The patch doesn't have functionality changed except two minor adjustment: 1) free_unbound_pwq() for the error path is removed, we use the heavier version put_pwq_unlocked() instead since the error path is rare. this adjustment simplifies the code. 2) the memory-allocation is also moved into wq_pool_mutex. this is needed to avoid to do the further splitting. tj: minor updates to comments. Suggested-by: Tejun Heo Cc: Christoph Lameter Cc: Kevin Hilman Cc: Lai Jiangshan Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Tejun Heo Cc: Viresh Kumar Cc: Frederic Weisbecker Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 199 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 115 insertions(+), 84 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 586ad91300b0..26ff24924016 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3425,17 +3425,6 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, return pwq; } -/* undo alloc_unbound_pwq(), used only in the error path */ -static void free_unbound_pwq(struct pool_workqueue *pwq) -{ - lockdep_assert_held(&wq_pool_mutex); - - if (pwq) { - put_unbound_pool(pwq->pool); - kmem_cache_free(pwq_cache, pwq); - } -} - /** * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node * @attrs: the wq_attrs of interest @@ -3498,42 +3487,48 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, return old_pwq; } -/** - * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue - * @wq: the target workqueue - * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() - * - * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA - * machines, this function maps a separate pwq to each NUMA node with - * possibles CPUs in @attrs->cpumask so that work items are affine to the - * NUMA node it was issued on. Older pwqs are released as in-flight work - * items finish. Note that a work item which repeatedly requeues itself - * back-to-back will stay on its current pwq. - * - * Performs GFP_KERNEL allocations. - * - * Return: 0 on success and -errno on failure. - */ -int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs) +/* context to store the prepared attrs & pwqs before applying */ +struct apply_wqattrs_ctx { + struct workqueue_struct *wq; /* target workqueue */ + struct workqueue_attrs *attrs; /* attrs to apply */ + struct pool_workqueue *dfl_pwq; + struct pool_workqueue *pwq_tbl[]; +}; + +/* free the resources after success or abort */ +static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) +{ + if (ctx) { + int node; + + for_each_node(node) + put_pwq_unlocked(ctx->pwq_tbl[node]); + put_pwq_unlocked(ctx->dfl_pwq); + + free_workqueue_attrs(ctx->attrs); + + kfree(ctx); + } +} + +/* allocate the attrs and pwqs for later installation */ +static struct apply_wqattrs_ctx * +apply_wqattrs_prepare(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) { + struct apply_wqattrs_ctx *ctx; struct workqueue_attrs *new_attrs, *tmp_attrs; - struct pool_workqueue **pwq_tbl, *dfl_pwq; - int node, ret; + int node; - /* only unbound workqueues can change attributes */ - if (WARN_ON(!(wq->flags & WQ_UNBOUND))) - return -EINVAL; + lockdep_assert_held(&wq_pool_mutex); - /* creating multiple pwqs breaks ordering guarantee */ - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) - return -EINVAL; + ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), + GFP_KERNEL); - pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL); new_attrs = alloc_workqueue_attrs(GFP_KERNEL); tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); - if (!pwq_tbl || !new_attrs || !tmp_attrs) - goto enomem; + if (!ctx || !new_attrs || !tmp_attrs) + goto out_free; /* make a copy of @attrs and sanitize it */ copy_workqueue_attrs(new_attrs, attrs); @@ -3546,76 +3541,112 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, */ copy_workqueue_attrs(tmp_attrs, new_attrs); - /* - * CPUs should stay stable across pwq creations and installations. - * Pin CPUs, determine the target cpumask for each node and create - * pwqs accordingly. - */ - get_online_cpus(); - - mutex_lock(&wq_pool_mutex); - /* * If something goes wrong during CPU up/down, we'll fall back to * the default pwq covering whole @attrs->cpumask. Always create * it even if we don't use it immediately. */ - dfl_pwq = alloc_unbound_pwq(wq, new_attrs); - if (!dfl_pwq) - goto enomem_pwq; + ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); + if (!ctx->dfl_pwq) + goto out_free; for_each_node(node) { if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) { - pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); - if (!pwq_tbl[node]) - goto enomem_pwq; + ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); + if (!ctx->pwq_tbl[node]) + goto out_free; } else { - dfl_pwq->refcnt++; - pwq_tbl[node] = dfl_pwq; + ctx->dfl_pwq->refcnt++; + ctx->pwq_tbl[node] = ctx->dfl_pwq; } } - mutex_unlock(&wq_pool_mutex); + ctx->attrs = new_attrs; + ctx->wq = wq; + free_workqueue_attrs(tmp_attrs); + return ctx; + +out_free: + free_workqueue_attrs(tmp_attrs); + free_workqueue_attrs(new_attrs); + apply_wqattrs_cleanup(ctx); + return NULL; +} + +/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ +static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) +{ + int node; /* all pwqs have been created successfully, let's install'em */ - mutex_lock(&wq->mutex); + mutex_lock(&ctx->wq->mutex); - copy_workqueue_attrs(wq->unbound_attrs, new_attrs); + copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); /* save the previous pwq and install the new one */ for_each_node(node) - pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]); + ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, + ctx->pwq_tbl[node]); /* @dfl_pwq might not have been used, ensure it's linked */ - link_pwq(dfl_pwq); - swap(wq->dfl_pwq, dfl_pwq); + link_pwq(ctx->dfl_pwq); + swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); - mutex_unlock(&wq->mutex); + mutex_unlock(&ctx->wq->mutex); +} - /* put the old pwqs */ - for_each_node(node) - put_pwq_unlocked(pwq_tbl[node]); - put_pwq_unlocked(dfl_pwq); +/** + * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue + * @wq: the target workqueue + * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() + * + * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA + * machines, this function maps a separate pwq to each NUMA node with + * possibles CPUs in @attrs->cpumask so that work items are affine to the + * NUMA node it was issued on. Older pwqs are released as in-flight work + * items finish. Note that a work item which repeatedly requeues itself + * back-to-back will stay on its current pwq. + * + * Performs GFP_KERNEL allocations. + * + * Return: 0 on success and -errno on failure. + */ +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) +{ + struct apply_wqattrs_ctx *ctx; + int ret = -ENOMEM; - put_online_cpus(); - ret = 0; - /* fall through */ -out_free: - free_workqueue_attrs(tmp_attrs); - free_workqueue_attrs(new_attrs); - kfree(pwq_tbl); - return ret; + /* only unbound workqueues can change attributes */ + if (WARN_ON(!(wq->flags & WQ_UNBOUND))) + return -EINVAL; -enomem_pwq: - free_unbound_pwq(dfl_pwq); - for_each_node(node) - if (pwq_tbl && pwq_tbl[node] != dfl_pwq) - free_unbound_pwq(pwq_tbl[node]); + /* creating multiple pwqs breaks ordering guarantee */ + if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) + return -EINVAL; + + /* + * CPUs should stay stable across pwq creations and installations. + * Pin CPUs, determine the target cpumask for each node and create + * pwqs accordingly. + */ + get_online_cpus(); + + mutex_lock(&wq_pool_mutex); + ctx = apply_wqattrs_prepare(wq, attrs); mutex_unlock(&wq_pool_mutex); + + /* the ctx has been prepared successfully, let's commit it */ + if (ctx) { + apply_wqattrs_commit(ctx); + ret = 0; + } + put_online_cpus(); -enomem: - ret = -ENOMEM; - goto out_free; + + apply_wqattrs_cleanup(ctx); + + return ret; } /** -- cgit From b05a79280b346eb24ddb73b39988398015291075 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 27 Apr 2015 17:58:39 +0800 Subject: workqueue: Create low-level unbound workqueues cpumask Create a cpumask that limits the affinity of all unbound workqueues. This cpumask is controlled through a file at the root of the workqueue sysfs directory. It works on a lower-level than the per WQ_SYSFS workqueues cpumask files such that the effective cpumask applied for a given unbound workqueue is the intersection of /sys/devices/virtual/workqueue/$WORKQUEUE/cpumask and the new /sys/devices/virtual/workqueue/cpumask file. This patch implements the basic infrastructure and the read interface. wq_unbound_cpumask is initially set to cpu_possible_mask. Cc: Christoph Lameter Cc: Kevin Hilman Cc: Lai Jiangshan Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Tejun Heo Cc: Viresh Kumar Signed-off-by: Frederic Weisbecker Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 26ff24924016..9be75e2a4da6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -299,6 +299,8 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */ static bool workqueue_freezing; /* PL: have wqs started freezing? */ +static cpumask_var_t wq_unbound_cpumask; + /* the per-cpu worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); @@ -3532,7 +3534,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, /* make a copy of @attrs and sanitize it */ copy_workqueue_attrs(new_attrs, attrs); - cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); + cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask); /* * We may create multiple pwqs with differing cpumasks. Make a @@ -4945,9 +4947,29 @@ static struct bus_type wq_subsys = { .dev_groups = wq_sysfs_groups, }; +static ssize_t wq_unbound_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int written; + + written = scnprintf(buf, PAGE_SIZE, "%*pb\n", + cpumask_pr_args(wq_unbound_cpumask)); + + return written; +} + +static struct device_attribute wq_sysfs_cpumask_attr = + __ATTR(cpumask, 0444, wq_unbound_cpumask_show, NULL); + static int __init wq_sysfs_init(void) { - return subsys_virtual_register(&wq_subsys, NULL); + int err; + + err = subsys_virtual_register(&wq_subsys, NULL); + if (err) + return err; + + return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr); } core_initcall(wq_sysfs_init); @@ -5095,6 +5117,9 @@ static int __init init_workqueues(void) WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); + BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); + cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); + pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); -- cgit From 042f7df15a4fff8eec42873f755aea848dcdedd1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 30 Apr 2015 17:16:12 +0800 Subject: workqueue: Allow modifying low level unbound workqueue cpumask Allow to modify the low-level unbound workqueues cpumask through sysfs. This is performed by traversing the entire workqueue list and calling apply_wqattrs_prepare() on the unbound workqueues with the new low level mask. Only after all the preparation are done, we commit them all together. Ordered workqueues are ignored from the low level unbound workqueue cpumask, it will be handled in near future. All the (default & per-node) pwqs are mandatorily controlled by the low level cpumask. If the user configured cpumask doesn't overlap with the low level cpumask, the low level cpumask will be used for the wq instead. The comment of wq_calc_node_cpumask() is updated and explicitly requires that its first argument should be the attrs of the default pwq. The default wq_unbound_cpumask is cpu_possible_mask. The workqueue subsystem doesn't know its best default value, let the system manager or the other subsystem set it when needed. Changed from V8: merge the calculating code for the attrs of the default pwq together. minor change the code&comments for saving the user configured attrs. remove unnecessary list_del(). minor update the comment of wq_calc_node_cpumask(). update the comment of workqueue_set_unbound_cpumask(); Cc: Christoph Lameter Cc: Kevin Hilman Cc: Lai Jiangshan Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Tejun Heo Cc: Viresh Kumar Cc: Frederic Weisbecker Original-patch-by: Frederic Weisbecker Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 127 ++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 119 insertions(+), 9 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index deee212af8e0..4618dd672d1b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -424,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); +int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9be75e2a4da6..a3915abc1983 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -299,7 +299,7 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */ static bool workqueue_freezing; /* PL: have wqs started freezing? */ -static cpumask_var_t wq_unbound_cpumask; +static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */ /* the per-cpu worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], @@ -3429,7 +3429,7 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, /** * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node - * @attrs: the wq_attrs of interest + * @attrs: the wq_attrs of the default pwq of the target workqueue * @node: the target NUMA node * @cpu_going_down: if >= 0, the CPU to consider as offline * @cpumask: outarg, the resulting cpumask @@ -3493,6 +3493,7 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, struct apply_wqattrs_ctx { struct workqueue_struct *wq; /* target workqueue */ struct workqueue_attrs *attrs; /* attrs to apply */ + struct list_head list; /* queued for batching commit */ struct pool_workqueue *dfl_pwq; struct pool_workqueue *pwq_tbl[]; }; @@ -3532,9 +3533,15 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, if (!ctx || !new_attrs || !tmp_attrs) goto out_free; - /* make a copy of @attrs and sanitize it */ + /* + * Calculate the attrs of the default pwq. + * If the user configured cpumask doesn't overlap with the + * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask. + */ copy_workqueue_attrs(new_attrs, attrs); cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask); + if (unlikely(cpumask_empty(new_attrs->cpumask))) + cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask); /* * We may create multiple pwqs with differing cpumasks. Make a @@ -3553,7 +3560,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, goto out_free; for_each_node(node) { - if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) { + if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) { ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); if (!ctx->pwq_tbl[node]) goto out_free; @@ -3563,7 +3570,11 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, } } + /* save the user configured attrs and sanitize it. */ + copy_workqueue_attrs(new_attrs, attrs); + cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); ctx->attrs = new_attrs; + ctx->wq = wq; free_workqueue_attrs(tmp_attrs); return ctx; @@ -3704,11 +3715,11 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, /* * Let's determine what needs to be done. If the target cpumask is - * different from wq's, we need to compare it to @pwq's and create - * a new one if they don't match. If the target cpumask equals - * wq's, the default pwq should be used. + * different from the default pwq's, we need to compare it to @pwq's + * and create a new one if they don't match. If the target cpumask + * equals the default pwq's, the default pwq should be used. */ - if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) { + if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) goto out_unlock; } else { @@ -4731,6 +4742,84 @@ out_unlock: } #endif /* CONFIG_FREEZER */ +static int workqueue_apply_unbound_cpumask(void) +{ + LIST_HEAD(ctxs); + int ret = 0; + struct workqueue_struct *wq; + struct apply_wqattrs_ctx *ctx, *n; + + lockdep_assert_held(&wq_pool_mutex); + + list_for_each_entry(wq, &workqueues, list) { + if (!(wq->flags & WQ_UNBOUND)) + continue; + /* creating multiple pwqs breaks ordering guarantee */ + if (wq->flags & __WQ_ORDERED) + continue; + + ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); + if (!ctx) { + ret = -ENOMEM; + break; + } + + list_add_tail(&ctx->list, &ctxs); + } + + list_for_each_entry_safe(ctx, n, &ctxs, list) { + if (!ret) + apply_wqattrs_commit(ctx); + apply_wqattrs_cleanup(ctx); + } + + return ret; +} + +/** + * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask + * @cpumask: the cpumask to set + * + * The low-level workqueues cpumask is a global cpumask that limits + * the affinity of all unbound workqueues. This function check the @cpumask + * and apply it to all unbound workqueues and updates all pwqs of them. + * + * Retun: 0 - Success + * -EINVAL - Invalid @cpumask + * -ENOMEM - Failed to allocate memory for attrs or pwqs. + */ +int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) +{ + int ret = -EINVAL; + cpumask_var_t saved_cpumask; + + if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) + return -ENOMEM; + + get_online_cpus(); + cpumask_and(cpumask, cpumask, cpu_possible_mask); + if (!cpumask_empty(cpumask)) { + mutex_lock(&wq_pool_mutex); + + /* save the old wq_unbound_cpumask. */ + cpumask_copy(saved_cpumask, wq_unbound_cpumask); + + /* update wq_unbound_cpumask at first and apply it to wqs. */ + cpumask_copy(wq_unbound_cpumask, cpumask); + ret = workqueue_apply_unbound_cpumask(); + + /* restore the wq_unbound_cpumask when failed. */ + if (ret < 0) + cpumask_copy(wq_unbound_cpumask, saved_cpumask); + + mutex_unlock(&wq_pool_mutex); + } + put_online_cpus(); + + free_cpumask_var(saved_cpumask); + return ret; +} + #ifdef CONFIG_SYSFS /* * Workqueues with WQ_SYSFS flag set is visible to userland via @@ -4952,14 +5041,34 @@ static ssize_t wq_unbound_cpumask_show(struct device *dev, { int written; + mutex_lock(&wq_pool_mutex); written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(wq_unbound_cpumask)); + mutex_unlock(&wq_pool_mutex); return written; } +static ssize_t wq_unbound_cpumask_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + cpumask_var_t cpumask; + int ret; + + if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + + ret = cpumask_parse(buf, cpumask); + if (!ret) + ret = workqueue_set_unbound_cpumask(cpumask); + + free_cpumask_var(cpumask); + return ret ? ret : count; +} + static struct device_attribute wq_sysfs_cpumask_attr = - __ATTR(cpumask, 0444, wq_unbound_cpumask_show, NULL); + __ATTR(cpumask, 0644, wq_unbound_cpumask_show, + wq_unbound_cpumask_store); static int __init wq_sysfs_init(void) { -- cgit From 6888c6f279a742a3190e186f44de70951b3491fd Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Tue, 5 May 2015 12:49:00 +0100 Subject: workqueue: fix trivial typo in Documentation/workqueue.txt Signed-off-by: Chris Bainbridge Signed-off-by: Tejun Heo --- Documentation/workqueue.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index f81a65b54c29..5e0e05c5183e 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt @@ -365,7 +365,7 @@ root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0] If kworkers are going crazy (using too much cpu), there are two types of possible problems: - 1. Something beeing scheduled in rapid succession + 1. Something being scheduled in rapid succession 2. A single work item that consumes lots of cpu cycles The first one can be tracked using tracing: -- cgit From 30186c6fdc9a6a78c8b03351256451f07c66b6cd Mon Sep 17 00:00:00 2001 From: Gong Zhaogang Date: Mon, 11 May 2015 11:02:47 -0400 Subject: workqueue: function name in the comment differs from the real function name modify wq_calc_node_mask to wq_calc_node_cpumask Signed-off-by: Gong Zhaogang Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a3915abc1983..4545f6732b69 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3428,7 +3428,7 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, } /** - * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node + * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node * @attrs: the wq_attrs of the default pwq of the target workqueue * @node: the target NUMA node * @cpu_going_down: if >= 0, the CPU to consider as offline -- cgit From b749b1b67351bd9be1aa640cadf66d32dfcccfd1 Mon Sep 17 00:00:00 2001 From: Chen Hanxiao Date: Wed, 13 May 2015 06:10:05 -0400 Subject: workqueue: fix a typo s/detemined/determined Signed-off-by: Chen Hanxiao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4545f6732b69..dd243ce33a89 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2618,7 +2618,7 @@ EXPORT_SYMBOL_GPL(flush_workqueue); * Wait until the workqueue becomes empty. While draining is in progress, * only chain queueing is allowed. IOW, only currently pending or running * work items on @wq can queue further work items on it. @wq is flushed - * repeatedly until it becomes empty. The number of flushing is detemined + * repeatedly until it becomes empty. The number of flushing is determined * by the depth of chaining and should be relatively short. Whine if it * takes too long. */ -- cgit From 5b95e1af8d17d85a17728f6de7dbff538e6e3c49 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 12 May 2015 20:32:29 +0800 Subject: workqueue: wq_pool_mutex protects the attrs-installation Current wq_pool_mutex doesn't proctect the attrs-installation, it results that ->unbound_attrs, ->numa_pwq_tbl[] and ->dfl_pwq can only be accessed under wq->mutex and causes some inconveniences. Example, wq_update_unbound_numa() has to acquire wq->mutex before fetching the wq->unbound_attrs->no_numa and the old_pwq. attrs-installation is a short operation, so this change will no cause any latency for other operations which also acquire the wq_pool_mutex. The only unprotected attrs-installation code is in apply_workqueue_attrs(), so this patch touches code less than comments. It is also a preparation patch for next several patches which read wq->unbound_attrs, wq->numa_pwq_tbl[] and wq->dfl_pwq with only wq_pool_mutex held. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dd243ce33a89..b1c222270e0e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -127,6 +127,11 @@ enum { * * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. * + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. + * + * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or + * sched-RCU for reads. + * * WQ: wq->mutex protected. * * WR: wq->mutex protected for writes. Sched-RCU protected for reads. @@ -247,8 +252,8 @@ struct workqueue_struct { int nr_drainers; /* WQ: drain in progress */ int saved_max_active; /* WQ: saved pwq max_active */ - struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */ - struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */ + struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ + struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ #ifdef CONFIG_SYSFS struct wq_device *wq_dev; /* I: for sysfs interface */ @@ -268,7 +273,7 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ - struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */ + struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */ }; static struct kmem_cache *pwq_cache; @@ -349,6 +354,12 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); lockdep_is_held(&wq->mutex), \ "sched RCU or wq->mutex should be held") +#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ + rcu_lockdep_assert(rcu_read_lock_sched_held() || \ + lockdep_is_held(&wq->mutex) || \ + lockdep_is_held(&wq_pool_mutex), \ + "sched RCU, wq->mutex or wq_pool_mutex should be held") + #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ @@ -553,7 +564,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) * @wq: the target workqueue * @node: the node ID * - * This must be called either with pwq_lock held or sched RCU read locked. + * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU + * read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * @@ -562,7 +574,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, int node) { - assert_rcu_or_wq_mutex(wq); + assert_rcu_or_wq_mutex_or_pool_mutex(wq); return rcu_dereference_raw(wq->numa_pwq_tbl[node]); } @@ -3479,6 +3491,7 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, { struct pool_workqueue *old_pwq; + lockdep_assert_held(&wq_pool_mutex); lockdep_assert_held(&wq->mutex); /* link_pwq() can handle duplicate calls */ @@ -3644,10 +3657,9 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, * pwqs accordingly. */ get_online_cpus(); - mutex_lock(&wq_pool_mutex); + ctx = apply_wqattrs_prepare(wq, attrs); - mutex_unlock(&wq_pool_mutex); /* the ctx has been prepared successfully, let's commit it */ if (ctx) { @@ -3655,6 +3667,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, ret = 0; } + mutex_unlock(&wq_pool_mutex); put_online_cpus(); apply_wqattrs_cleanup(ctx); -- cgit From f7142ed483f49f9108bea1be0c1afcd5d9098e05 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 12 May 2015 20:32:30 +0800 Subject: workqueue: simplify wq_update_unbound_numa() wq_update_unbound_numa() is known be called with wq_pool_mutex held. But wq_update_unbound_numa() requests wq->mutex before reading wq->unbound_attrs, wq->numa_pwq_tbl[] and wq->dfl_pwq. But these fields were changed to be allowed being read with wq_pool_mutex held. So we simply remove the mutex_lock(&wq->mutex). Without the dependence on the the mutex_lock(&wq->mutex), the test of wq->unbound_attrs->no_numa can also be moved upward. The old code need a long comment to describe the stableness of @wq->unbound_attrs which is also guaranteed by wq_pool_mutex now, so we don't need this such comment. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b1c222270e0e..4a9f65b54ee5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3708,7 +3708,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, lockdep_assert_held(&wq_pool_mutex); - if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND)) + if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || + wq->unbound_attrs->no_numa) return; /* @@ -3719,10 +3720,6 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, target_attrs = wq_update_unbound_numa_attrs_buf; cpumask = target_attrs->cpumask; - mutex_lock(&wq->mutex); - if (wq->unbound_attrs->no_numa) - goto out_unlock; - copy_workqueue_attrs(target_attrs, wq->unbound_attrs); pwq = unbound_pwq_by_node(wq, node); @@ -3734,33 +3731,26 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, */ if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) - goto out_unlock; + return; } else { goto use_dfl_pwq; } - mutex_unlock(&wq->mutex); - /* create a new pwq */ pwq = alloc_unbound_pwq(wq, target_attrs); if (!pwq) { pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", wq->name); - mutex_lock(&wq->mutex); goto use_dfl_pwq; } - /* - * Install the new pwq. As this function is called only from CPU - * hotplug callbacks and applying a new attrs is wrapped with - * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed - * inbetween. - */ + /* Install the new pwq. */ mutex_lock(&wq->mutex); old_pwq = numa_pwq_tbl_install(wq, node, pwq); goto out_unlock; use_dfl_pwq: + mutex_lock(&wq->mutex); spin_lock_irq(&wq->dfl_pwq->pool->lock); get_pwq(wq->dfl_pwq); spin_unlock_irq(&wq->dfl_pwq->pool->lock); -- cgit From a0111cf6710bd1b4145ef313d3f4772602af051b Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 May 2015 18:03:47 +0800 Subject: workqueue: separate out and refactor the locking of applying attrs Applying attrs requires two locks: get_online_cpus() and wq_pool_mutex, and this code is duplicated at two places (apply_workqueue_attrs() and workqueue_set_unbound_cpumask()). So we separate out this locking code into apply_wqattrs_[un]lock() and do a minor refactor on apply_workqueue_attrs(). The apply_wqattrs_[un]lock() will be also used on later patch for ensuring attrs changes are properly synchronized. tj: minor updates to comments Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 78 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4a9f65b54ee5..72c1adbf7632 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3621,24 +3621,21 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) mutex_unlock(&ctx->wq->mutex); } -/** - * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue - * @wq: the target workqueue - * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() - * - * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA - * machines, this function maps a separate pwq to each NUMA node with - * possibles CPUs in @attrs->cpumask so that work items are affine to the - * NUMA node it was issued on. Older pwqs are released as in-flight work - * items finish. Note that a work item which repeatedly requeues itself - * back-to-back will stay on its current pwq. - * - * Performs GFP_KERNEL allocations. - * - * Return: 0 on success and -errno on failure. - */ -int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs) +static void apply_wqattrs_lock(void) +{ + /* CPUs should stay stable across pwq creations and installations */ + get_online_cpus(); + mutex_lock(&wq_pool_mutex); +} + +static void apply_wqattrs_unlock(void) +{ + mutex_unlock(&wq_pool_mutex); + put_online_cpus(); +} + +static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) { struct apply_wqattrs_ctx *ctx; int ret = -ENOMEM; @@ -3651,14 +3648,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) return -EINVAL; - /* - * CPUs should stay stable across pwq creations and installations. - * Pin CPUs, determine the target cpumask for each node and create - * pwqs accordingly. - */ - get_online_cpus(); - mutex_lock(&wq_pool_mutex); - ctx = apply_wqattrs_prepare(wq, attrs); /* the ctx has been prepared successfully, let's commit it */ @@ -3667,14 +3656,39 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, ret = 0; } - mutex_unlock(&wq_pool_mutex); - put_online_cpus(); - apply_wqattrs_cleanup(ctx); return ret; } +/** + * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue + * @wq: the target workqueue + * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() + * + * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA + * machines, this function maps a separate pwq to each NUMA node with + * possibles CPUs in @attrs->cpumask so that work items are affine to the + * NUMA node it was issued on. Older pwqs are released as in-flight work + * items finish. Note that a work item which repeatedly requeues itself + * back-to-back will stay on its current pwq. + * + * Performs GFP_KERNEL allocations. + * + * Return: 0 on success and -errno on failure. + */ +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs) +{ + int ret; + + apply_wqattrs_lock(); + ret = apply_workqueue_attrs_locked(wq, attrs); + apply_wqattrs_unlock(); + + return ret; +} + /** * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug * @wq: the target workqueue @@ -4799,10 +4813,9 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) return -ENOMEM; - get_online_cpus(); cpumask_and(cpumask, cpumask, cpu_possible_mask); if (!cpumask_empty(cpumask)) { - mutex_lock(&wq_pool_mutex); + apply_wqattrs_lock(); /* save the old wq_unbound_cpumask. */ cpumask_copy(saved_cpumask, wq_unbound_cpumask); @@ -4815,9 +4828,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) if (ret < 0) cpumask_copy(wq_unbound_cpumask, saved_cpumask); - mutex_unlock(&wq_pool_mutex); + apply_wqattrs_unlock(); } - put_online_cpus(); free_cpumask_var(saved_cpumask); return ret; -- cgit From d4d3e2579756e3a5f4fbf8eac211f0696e253bcd Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 May 2015 18:03:48 +0800 Subject: workqueue: ensure attrs changes are properly synchronized Current modification to attrs via sysfs is not fully synchronized. Process A (change cpumask) | Process B (change numa affinity) wq_cpumask_store() | wq_sysfs_prep_attrs() | | apply_workqueue_attrs() apply_workqueue_attrs() | It results that the Process B's operation is totally reverted without any notification, it is a buggy behavior. So this patch moves wq_sysfs_prep_attrs() into the protection under wq_pool_mutex to ensure attrs changes are properly synchronized. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 72c1adbf7632..80190755a3c4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4954,18 +4954,22 @@ static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; - int ret; + int ret = -ENOMEM; + + apply_wqattrs_lock(); attrs = wq_sysfs_prep_attrs(wq); if (!attrs) - return -ENOMEM; + goto out_unlock; if (sscanf(buf, "%d", &attrs->nice) == 1 && attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) - ret = apply_workqueue_attrs(wq, attrs); + ret = apply_workqueue_attrs_locked(wq, attrs); else ret = -EINVAL; +out_unlock: + apply_wqattrs_unlock(); free_workqueue_attrs(attrs); return ret ?: count; } @@ -4989,16 +4993,20 @@ static ssize_t wq_cpumask_store(struct device *dev, { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; - int ret; + int ret = -ENOMEM; + + apply_wqattrs_lock(); attrs = wq_sysfs_prep_attrs(wq); if (!attrs) - return -ENOMEM; + goto out_unlock; ret = cpumask_parse(buf, attrs->cpumask); if (!ret) - ret = apply_workqueue_attrs(wq, attrs); + ret = apply_workqueue_attrs_locked(wq, attrs); +out_unlock: + apply_wqattrs_unlock(); free_workqueue_attrs(attrs); return ret ?: count; } @@ -5022,18 +5030,22 @@ static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; - int v, ret; + int v, ret = -ENOMEM; + + apply_wqattrs_lock(); attrs = wq_sysfs_prep_attrs(wq); if (!attrs) - return -ENOMEM; + goto out_unlock; ret = -EINVAL; if (sscanf(buf, "%d", &v) == 1) { attrs->no_numa = !v; - ret = apply_workqueue_attrs(wq, attrs); + ret = apply_workqueue_attrs_locked(wq, attrs); } +out_unlock: + apply_wqattrs_unlock(); free_workqueue_attrs(attrs); return ret ?: count; } -- cgit From da7f91b2e2c6176f95ca7b538d74dc70c5d11ded Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 20 May 2015 14:41:17 +0800 Subject: workqueue: remove the declaration of copy_workqueue_attrs() This pre-declaration was unneeded since a previous refactor patch 6ba94429c8e7 ("workqueue: Reorder sysfs code"). Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 80190755a3c4..bd4b24d3489d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -337,8 +337,6 @@ struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); static int worker_thread(void *__worker); -static void copy_workqueue_attrs(struct workqueue_attrs *to, - const struct workqueue_attrs *from); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); #define CREATE_TRACE_POINTS -- cgit From 899a94fe15a8e928277ff0d0402c086fa67fe16f Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 20 May 2015 14:41:18 +0800 Subject: workqueue: remove the lock from wq_sysfs_prep_attrs() Reading to wq->unbound_attrs requires protection of either wq_pool_mutex or wq->mutex, and wq_sysfs_prep_attrs() is called with wq_pool_mutex held, so we don't need to grab wq->mutex here. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bd4b24d3489d..ad8dc2b9efc3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4937,13 +4937,13 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) { struct workqueue_attrs *attrs; + lockdep_assert_held(&wq_pool_mutex); + attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!attrs) return NULL; - mutex_lock(&wq->mutex); copy_workqueue_attrs(attrs, wq->unbound_attrs); - mutex_unlock(&wq->mutex); return attrs; } -- cgit From 37b1ef31a568fc02e53587620226e5f3c66454c8 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 20 May 2015 14:41:19 +0800 Subject: workqueue: move flush_scheduled_work() to workqueue.h flush_scheduled_work() is just a simple call to flush_work(). Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 30 +++++++++++++++++++++++++++++- kernel/workqueue.c | 30 ------------------------------ 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4618dd672d1b..738b30b39b68 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -435,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, extern void flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); -extern void flush_scheduled_work(void); extern int schedule_on_each_cpu(work_func_t func); @@ -531,6 +530,35 @@ static inline bool schedule_work(struct work_struct *work) return queue_work(system_wq, work); } +/** + * flush_scheduled_work - ensure that any scheduled work has run to completion. + * + * Forces execution of the kernel-global workqueue and blocks until its + * completion. + * + * Think twice before calling this function! It's very easy to get into + * trouble if you don't take great care. Either of the following situations + * will lead to deadlock: + * + * One of the work items currently on the workqueue needs to acquire + * a lock held by your code or its caller. + * + * Your code is running in the context of a work routine. + * + * They will be detected by lockdep when they occur, but the first might not + * occur very often. It depends on what work items are on the workqueue and + * what locks they need, which you have no control over. + * + * In most situations flushing the entire workqueue is overkill; you merely + * need to know that a particular work item isn't queued and isn't running. + * In such cases you should use cancel_delayed_work_sync() or + * cancel_work_sync() instead. + */ +static inline void flush_scheduled_work(void) +{ + flush_workqueue(system_wq); +} + /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ad8dc2b9efc3..c9eaa4e5c867 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2958,36 +2958,6 @@ int schedule_on_each_cpu(work_func_t func) return 0; } -/** - * flush_scheduled_work - ensure that any scheduled work has run to completion. - * - * Forces execution of the kernel-global workqueue and blocks until its - * completion. - * - * Think twice before calling this function! It's very easy to get into - * trouble if you don't take great care. Either of the following situations - * will lead to deadlock: - * - * One of the work items currently on the workqueue needs to acquire - * a lock held by your code or its caller. - * - * Your code is running in the context of a work routine. - * - * They will be detected by lockdep when they occur, but the first might not - * occur very often. It depends on what work items are on the workqueue and - * what locks they need, which you have no control over. - * - * In most situations flushing the entire workqueue is overkill; you merely - * need to know that a particular work item isn't queued and isn't running. - * In such cases you should use cancel_delayed_work_sync() or - * cancel_work_sync() instead. - */ -void flush_scheduled_work(void) -{ - flush_workqueue(system_wq); -} -EXPORT_SYMBOL(flush_scheduled_work); - /** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute -- cgit From 402dd89d6cdbeeaab42b810542b487017725c628 Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Sat, 23 May 2015 10:38:14 +0530 Subject: workqueue: fix typos in comments tj: dropped iff -> if, iff is if and only if not a typo. Spotted by Randy Dunlap. Signed-off-by: Shailendra Verma Signed-off-by: Tejun Heo Cc: Randy Dunlap --- kernel/workqueue.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c9eaa4e5c867..5243d4b03087 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -988,7 +988,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled * @head: target list to append @work to - * @nextp: out paramter for nested worklist walking + * @nextp: out parameter for nested worklist walking * * Schedule linked works starting from @work to @head. Work series to * be scheduled starts at @work and includes any consecutive work with @@ -3063,7 +3063,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, * init_worker_pool - initialize a newly zalloc'd worker_pool * @pool: worker_pool to initialize * - * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. + * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. * * Return: 0 on success, -errno on failure. Even on failure, all fields * inside @pool proper are initialized and put_unbound_pool() can be called @@ -4414,7 +4414,7 @@ static void rebind_workers(struct worker_pool *pool) /* * Restore CPU affinity of all workers. As all idle workers should * be on the run-queue of the associated CPU before any local - * wake-ups for concurrency management happen, restore CPU affinty + * wake-ups for concurrency management happen, restore CPU affinity * of all workers first and then clear UNBOUND. As we're called * from CPU_ONLINE, the following shouldn't fail. */ @@ -5105,7 +5105,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) int ret; /* - * Adjusting max_active or creating new pwqs by applyting + * Adjusting max_active or creating new pwqs by applying * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ -- cgit