From c976a862ba4869c1e75c39b9b8f1e9ebfe90cdfc Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 26 Apr 2018 16:00:51 +0530 Subject: sched/fair: Avoid calling sync_entity_load_avg() unnecessarily Call sync_entity_load_avg() directly from find_idlest_cpu() instead of select_task_rq_fair(), as that's where we need to use task's utilization value. And call sync_entity_load_avg() only after making sure sched domain spans over one of the allowed CPUs for the task. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Link: http://lkml.kernel.org/r/cd019d1753824c81130eae7b43e2bbcec47cc1ad.1524738578.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4b346f358005..1f6a23a5b451 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6199,6 +6199,13 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) return prev_cpu; + /* + * We need task's util for capacity_spare_wake, sync it up to prev_cpu's + * last_update_time. + */ + if (!(sd_flag & SD_BALANCE_FORK)) + sync_entity_load_avg(&p->se); + while (sd) { struct sched_group *group; struct sched_domain *tmp; @@ -6651,15 +6658,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (unlikely(sd)) { /* Slow path */ - - /* - * We're going to need the task's util for capacity_spare_wake - * in find_idlest_group. Sync it up to prev_cpu's - * last_update_time. - */ - if (!(sd_flag & SD_BALANCE_FORK)) - sync_entity_load_avg(&p->se); - new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ /* Fast path */ -- cgit