summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-06 16:11:34 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:14 +0200
commit840c5abca499a858619954dbcffc82110bb6e076 (patch)
tree26da2483cf2f0fcdb7446595b311a2d78f85f68f /kernel/sched
parent8d5b9025f9b4500f828260dc62e8ffa823ce0d59 (diff)
sched/fair: More accurate reweight_entity()
When a (group) entity changes it's weight we should instantly change its load_avg and propagate that change into the sums it is part of. Because we use these values to predict future behaviour and are not interested in its historical value. Without this change, the change in load would need to propagate through the average, by which time it could again have changed etc.. always chasing itself. With this change, the cfs_rq load_avg sum will more accurately reflect the current runnable and expected return of blocked load. Reported-by: Paul Turner <pjt@google.com> [josef: compile fix !SMP || !FAIR_GROUP] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 654d8e3d6047..750ae4dbf812 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2886,12 +2886,22 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
if (cfs_rq->curr == se)
update_curr(cfs_rq);
account_entity_dequeue(cfs_rq, se);
+ dequeue_runnable_load_avg(cfs_rq, se);
}
+ dequeue_load_avg(cfs_rq, se);
update_load_set(&se->load, weight);
- if (se->on_rq)
+#ifdef CONFIG_SMP
+ se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum,
+ LOAD_AVG_MAX - 1024 + se->avg.period_contrib);
+#endif
+
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
account_entity_enqueue(cfs_rq, se);
+ enqueue_runnable_load_avg(cfs_rq, se);
+ }
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);