summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--kernel/workqueue.c12
2 files changed, 6 insertions, 10 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e48554e6526c..8b505d22fc0e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -665,7 +665,7 @@ int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
-int __init workqueue_init_early(void);
-int __init workqueue_init(void);
+void __init workqueue_init_early(void);
+void __init workqueue_init(void);
#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4e01c448b4b4..3816a18c251e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2834,7 +2834,7 @@ void flush_workqueue(struct workqueue_struct *wq)
* First flushers are responsible for cascading flushes and
* handling overflow. Non-first flushers can simply return.
*/
- if (wq->first_flusher != &this_flusher)
+ if (READ_ONCE(wq->first_flusher) != &this_flusher)
return;
mutex_lock(&wq->mutex);
@@ -2843,7 +2843,7 @@ void flush_workqueue(struct workqueue_struct *wq)
if (wq->first_flusher != &this_flusher)
goto out_unlock;
- wq->first_flusher = NULL;
+ WRITE_ONCE(wq->first_flusher, NULL);
WARN_ON_ONCE(!list_empty(&this_flusher.list));
WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
@@ -5898,7 +5898,7 @@ static void __init wq_numa_init(void)
* items. Actual work item execution starts only after kthreads can be
* created and scheduled right before early initcalls.
*/
-int __init workqueue_init_early(void)
+void __init workqueue_init_early(void)
{
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
@@ -5965,8 +5965,6 @@ int __init workqueue_init_early(void)
!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
-
- return 0;
}
/**
@@ -5978,7 +5976,7 @@ int __init workqueue_init_early(void)
* are no kworkers executing the work items yet. Populate the worker pools
* with the initial workers and enable future kworker creations.
*/
-int __init workqueue_init(void)
+void __init workqueue_init(void)
{
struct workqueue_struct *wq;
struct worker_pool *pool;
@@ -6025,6 +6023,4 @@ int __init workqueue_init(void)
wq_online = true;
wq_watchdog_init();
-
- return 0;
}