summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3bef0754cf73..9f9148075828 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2481,7 +2481,7 @@ EXPORT_SYMBOL_GPL(queue_work_node);
void delayed_work_timer_fn(struct timer_list *t)
{
- struct delayed_work *dwork = from_timer(dwork, t, timer);
+ struct delayed_work *dwork = timer_container_of(dwork, t, timer);
/* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
@@ -2909,7 +2909,7 @@ static void set_worker_dying(struct worker *worker, struct list_head *list)
*/
static void idle_worker_timeout(struct timer_list *t)
{
- struct worker_pool *pool = from_timer(pool, t, idle_timer);
+ struct worker_pool *pool = timer_container_of(pool, t, idle_timer);
bool do_cull = false;
if (work_pending(&pool->idle_cull_work))
@@ -3008,7 +3008,7 @@ static void send_mayday(struct work_struct *work)
static void pool_mayday_timeout(struct timer_list *t)
{
- struct worker_pool *pool = from_timer(pool, t, mayday_timer);
+ struct worker_pool *pool = timer_container_of(pool, t, mayday_timer);
struct work_struct *work;
raw_spin_lock_irq(&pool->lock);
@@ -7767,7 +7767,8 @@ void __init workqueue_init_early(void)
restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
-
+ cpumask_andnot(wq_isolated_cpumask, cpu_possible_mask,
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();