summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 05bf5427124a..9f4341885f60 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4422,7 +4422,8 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
/**
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
* @wq: the target workqueue
- * @cpu: the CPU coming up or going down
+ * @cpu: the CPU to update pool association for
+ * @hotplug_cpu: the CPU coming up or going down
* @online: whether @cpu is coming up or going down
*
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
@@ -4442,10 +4443,10 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
* CPU_DOWN_PREPARE.
*/
static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
- bool online)
+ int hotplug_cpu, bool online)
{
int node = cpu_to_node(cpu);
- int cpu_off = online ? -1 : cpu;
+ int off_cpu = online ? -1 : hotplug_cpu;
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
cpumask_t *cpumask;
@@ -4473,7 +4474,7 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
* and create a new one if they don't match. If the target cpumask
* equals the default pwq's, the default pwq should be used.
*/
- if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
+ if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, off_cpu, cpumask)) {
if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
return;
} else {
@@ -5514,8 +5515,15 @@ int workqueue_online_cpu(unsigned int cpu)
}
/* update NUMA affinity of unbound workqueues */
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, true);
+ list_for_each_entry(wq, &workqueues, list) {
+ int tcpu;
+
+ for_each_possible_cpu(tcpu) {
+ if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
+ wq_update_unbound_numa(wq, tcpu, cpu, true);
+ }
+ }
+ }
mutex_unlock(&wq_pool_mutex);
return 0;
@@ -5533,8 +5541,15 @@ int workqueue_offline_cpu(unsigned int cpu)
/* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, false);
+ list_for_each_entry(wq, &workqueues, list) {
+ int tcpu;
+
+ for_each_possible_cpu(tcpu) {
+ if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
+ wq_update_unbound_numa(wq, tcpu, cpu, false);
+ }
+ }
+ }
mutex_unlock(&wq_pool_mutex);
return 0;
@@ -6509,7 +6524,8 @@ void __init workqueue_init(void)
}
list_for_each_entry(wq, &workqueues, list) {
- wq_update_unbound_numa(wq, smp_processor_id(), true);
+ wq_update_unbound_numa(wq, smp_processor_id(), smp_processor_id(),
+ true);
WARN(init_rescuer(wq),
"workqueue: failed to create early rescuer for %s",
wq->name);