summaryrefslogtreecommitdiff
path: root/kernel/sched/pelt.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-06-28 17:45:07 +0200
committerIngo Molnar <mingo@kernel.org>2018-07-15 23:51:20 +0200
commit3727e0e16340cbdf83818f5bf0113505c6876057 (patch)
tree64faaa3a8d79c83a2929f6c50af75f0005c4d021 /kernel/sched/pelt.c
parent3ae117c6cd7c4783819a0766aa97b9493a8a0f62 (diff)
sched/dl: Add dl_rq utilization tracking
Similarly to what happens with RT tasks, CFS tasks can be preempted by DL tasks and the CFS's utilization might no longer describes the real utilization level. Current DL bandwidth reflects the requirements to meet deadline when tasks are enqueued but not the current utilization of the DL sched class. We track DL class utilization to estimate the system utilization. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Cc: viresh.kumar@linaro.org Link: http://lkml.kernel.org/r/1530200714-4504-5-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/pelt.c')
-rw-r--r--kernel/sched/pelt.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index a00b1ba3dd5b..8b78b6320cda 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -334,3 +334,26 @@ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}
+
+/*
+ * dl_rq:
+ *
+ * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
+ * util_sum = cpu_scale * load_sum
+ * runnable_load_sum = load_sum
+ *
+ */
+
+int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+{
+ if (___update_load_sum(now, rq->cpu, &rq->avg_dl,
+ running,
+ running,
+ running)) {
+
+ ___update_load_avg(&rq->avg_dl, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}