diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/deadline.c | 18 | ||||
-rw-r--r-- | kernel/sched/debug.c | 6 | ||||
-rw-r--r-- | kernel/sched/ext.c | 4 | ||||
-rw-r--r-- | kernel/sched/topology.c | 2 |
4 files changed, 20 insertions, 10 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e2d51f4306b3..f25301267e47 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1496,10 +1496,12 @@ throttle: } if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) { - if (dl_server(dl_se)) - enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); - else + if (dl_server(dl_se)) { + replenish_dl_new_period(dl_se, rq); + start_dl_timer(dl_se); + } else { enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH); + } } if (!is_leftmost(dl_se, &rq->dl)) @@ -1611,7 +1613,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se) static bool dl_server_stopped(struct sched_dl_entity *dl_se) { if (!dl_se->dl_server_active) - return false; + return true; if (dl_se->dl_server_idle) { dl_server_stop(dl_se); @@ -1849,7 +1851,9 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) u64 deadline = dl_se->deadline; dl_rq->dl_nr_running++; - add_nr_running(rq_of_dl_rq(dl_rq), 1); + + if (!dl_server(dl_se)) + add_nr_running(rq_of_dl_rq(dl_rq), 1); inc_dl_deadline(dl_rq, deadline); } @@ -1859,7 +1863,9 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { WARN_ON(!dl_rq->dl_nr_running); dl_rq->dl_nr_running--; - sub_nr_running(rq_of_dl_rq(dl_rq), 1); + + if (!dl_server(dl_se)) + sub_nr_running(rq_of_dl_rq(dl_rq), 1); dec_dl_deadline(dl_rq, dl_se->deadline); } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 3f06ab84d53f..02e16b70a790 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -376,10 +376,8 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu return -EINVAL; } - if (rq->cfs.h_nr_queued) { - update_rq_clock(rq); - dl_server_stop(&rq->fair_server); - } + update_rq_clock(rq); + dl_server_stop(&rq->fair_server); retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0); if (retval) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7dedc9a16281..4ae32ef179dd 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5749,6 +5749,9 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) __setscheduler_class(p->policy, p->prio); struct sched_enq_and_set_ctx ctx; + if (!tryget_task_struct(p)) + continue; + if (old_class != new_class && p->se.sched_delayed) dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); @@ -5761,6 +5764,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) sched_enq_and_set_task(&ctx); check_class_changed(task_rq(p), p, old_class, p->prio); + put_task_struct(p); } scx_task_iter_stop(&sti); percpu_up_write(&scx_fork_rwsem); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 977e133bb8a4..6e2f54169e66 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2201,6 +2201,8 @@ int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) goto unlock; hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); + if (!hop_masks) + goto unlock; hop = hop_masks - k.masks; ret = hop ? |