summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWander Lairson Costa <wander@redhat.com>2024-07-24 11:22:48 -0300
committerPeter Zijlstra <peterz@infradead.org>2024-12-02 12:01:31 +0100
commit3a181f20fb4e9ad3c93ea6c71520c23826042629 (patch)
tree946953dd430a3469f0d8e32939878aee51a99944
parent53916d5fd3c0b658de3463439dd2b7ce765072cb (diff)
sched/deadline: Consolidate Timer Cancellation
After commit b58652db66c9 ("sched/deadline: Fix task_struct reference leak"), I identified additional calls to hrtimer_try_to_cancel that might also require a dl_server check. It remains unclear whether this omission was intentional or accidental in those contexts. This patch consolidates the timer cancellation logic into dedicated functions, ensuring consistent behavior across all calls. Additionally, it reduces code duplication and improves overall code cleanliness. Note the use of the __always_inline keyword. In some instances, we have a task_struct pointer, dereference the dl member, and then use the container_of macro to retrieve the task_struct pointer again. By inlining the code, the compiler can potentially optimize out this redundant round trip. Signed-off-by: Wander Lairson Costa <wander@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lore.kernel.org/r/20240724142253.27145-3-wander@redhat.com
-rw-r--r--kernel/sched/deadline.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 1c8b8381dd20..33b4646f8b24 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -342,6 +342,29 @@ static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_s
__add_rq_bw(new_bw, &rq->dl);
}
+static __always_inline
+void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer)
+{
+ /*
+ * If the timer callback was running (hrtimer_try_to_cancel == -1),
+ * it will eventually call put_task_struct().
+ */
+ if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se))
+ put_task_struct(dl_task_of(dl_se));
+}
+
+static __always_inline
+void cancel_replenish_timer(struct sched_dl_entity *dl_se)
+{
+ cancel_dl_timer(dl_se, &dl_se->dl_timer);
+}
+
+static __always_inline
+void cancel_inactive_timer(struct sched_dl_entity *dl_se)
+{
+ cancel_dl_timer(dl_se, &dl_se->inactive_timer);
+}
+
static void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
@@ -495,10 +518,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
* will not touch the rq's active utilization,
* so we are still safe.
*/
- if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
- if (!dl_server(dl_se))
- put_task_struct(dl_task_of(dl_se));
- }
+ cancel_inactive_timer(dl_se);
} else {
/*
* Since "dl_non_contending" is not set, the
@@ -2113,13 +2133,8 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* The replenish timer needs to be canceled. No
* problem if it fires concurrently: boosted threads
* are ignored in dl_task_timer().
- *
- * If the timer callback was running (hrtimer_try_to_cancel == -1),
- * it will eventually call put_task_struct().
*/
- if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
- !dl_server(&p->dl))
- put_task_struct(p);
+ cancel_replenish_timer(&p->dl);
p->dl.dl_throttled = 0;
}
} else if (!dl_prio(p->normal_prio)) {
@@ -2287,8 +2302,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
* will not touch the rq's active utilization,
* so we are still safe.
*/
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ cancel_inactive_timer(&p->dl);
}
sub_rq_bw(&p->dl, &rq->dl);
rq_unlock(rq, &rf);
@@ -3036,8 +3050,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
*/
static void switched_to_dl(struct rq *rq, struct task_struct *p)
{
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
+ cancel_inactive_timer(&p->dl);
/*
* In case a task is setscheduled to SCHED_DEADLINE we need to keep