From 0e0cafcda83951bb0b438d78a4216ae94483cba6 Mon Sep 17 00:00:00 2001 From: Alexei Potashnik Date: Tue, 18 Jul 2017 11:12:53 -0700 Subject: workqueue: doc change for ST behavior on NUMA systems NUMA rework of workqueue made the combination of max_active of 1 and WQ_UNBOUND insufficient to guarantee ST behavior system wide. alloc_ordered_queue should now be used instead. Signed-off-by: Alexei Potashnik Signed-off-by: Tejun Heo --- Documentation/core-api/workqueue.rst | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst index ffdec94fbca1..3943b5bfa8cf 100644 --- a/Documentation/core-api/workqueue.rst +++ b/Documentation/core-api/workqueue.rst @@ -243,11 +243,15 @@ throttling the number of active work items, specifying '0' is recommended. Some users depend on the strict execution ordering of ST wq. The -combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` is used to -achieve this behavior. Work items on such wq are always queued to the -unbound worker-pools and only one work item can be active at any given +combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` used to +achieve this behavior. Work items on such wq were always queued to the +unbound worker-pools and only one work item could be active at any given time thus achieving the same ordering property as ST wq. +In the current implementation the above configuration only guarantees +ST behavior within a given NUMA node. Instead alloc_ordered_queue should +be used to achieve system wide ST behavior. + Example Execution Scenarios =========================== -- cgit From 9a2614916ac564d6ea1d0a5cb986298bc508c3bf Mon Sep 17 00:00:00 2001 From: Benjamin Peterson Date: Sun, 6 Aug 2017 19:33:22 -0700 Subject: workqueue: fix path to documentation Signed-off-by: Benjamin Peterson Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a86688fabc55..4fa6c7650f09 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -21,7 +21,7 @@ * pools for workqueues which are not bound to any specific CPU - the * number of these backing pools is dynamic. * - * Please read Documentation/workqueue.txt for details. + * Please read Documentation/core-api/workqueue.rst for details. */ #include -- cgit From c5a94a618e7ac86b20f53d947f68d7cee6a4c6bc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 23 Aug 2017 13:58:44 +0200 Subject: workqueue: Use TASK_IDLE Workqueues don't use signals, it (ab)uses TASK_INTERRUPTIBLE to avoid increasing the loadavg numbers. We've 'recently' introduced TASK_IDLE for this case: 80ed87c8a9ca ("sched/wait: Introduce TASK_NOLOAD and TASK_IDLE") use it. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4fa6c7650f09..2d278b9a5469 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2247,7 +2247,7 @@ sleep: * event. */ worker_enter_idle(worker); - __set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_IDLE); spin_unlock_irq(&pool->lock); schedule(); goto woke_up; @@ -2289,7 +2289,7 @@ static int rescuer_thread(void *__rescuer) */ rescuer->task->flags |= PF_WQ_WORKER; repeat: - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_IDLE); /* * By the time the rescuer is requested to stop, the workqueue -- cgit From fbf1c41fc0f4d3574ac2377245efd666c1fa3075 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 3 Sep 2017 01:18:41 +0100 Subject: workqueue: Fix flag collision Commit 0a94efb5acbb ("workqueue: implicit ordered attribute should be overridable") introduced a __WQ_ORDERED_EXPLICIT flag but gave it the same value as __WQ_LEGACY. I don't believe these were intended to mean the same thing, so renumber __WQ_ORDERED_EXPLICIT. Fixes: 0a94efb5acbb ("workqueue: implicit ordered attribute should be ...") Signed-off-by: Ben Hutchings Cc: stable@vger.kernel.org # v4.13 Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index db6dc9dc0482..1c49431f3121 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -323,8 +323,8 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ - __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ + __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ -- cgit