diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2025-09-09 13:16:23 +0200 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2025-10-16 11:13:53 +0200 |
| commit | 650952d3fb3889b04cbda722351b5d6090a1c10b (patch) | |
| tree | e59a440e2f6ee8889653016b52826be5acc8d168 | |
| parent | b079d93796528053cde322f2ca838c2d21c297e7 (diff) | |
sched: Make __do_set_cpus_allowed() use the sched_change pattern
Now that do_set_cpus_allowed() holds all the regular locks, convert it
to use the sched_change pattern helper.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
| -rw-r--r-- | kernel/sched/core.c | 26 |
1 files changed, 5 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 638bffd4c1a2..e932439ae6da 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2664,28 +2664,12 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx static void do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) { - struct rq *rq = task_rq(p); - bool queued, running; - - lockdep_assert_held(&p->pi_lock); - lockdep_assert_rq_held(rq); - - queued = task_on_rq_queued(p); - running = task_current_donor(rq, p); - - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); + u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK; - if (running) - put_prev_task(rq, p); - - p->sched_class->set_cpus_allowed(p, ctx); - mm_set_cpus_allowed(p->mm, ctx->new_mask); - - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); + scoped_guard (sched_change, p, flags) { + p->sched_class->set_cpus_allowed(p, ctx); + mm_set_cpus_allowed(p->mm, ctx->new_mask); + } } /* |
