From a030d7381d8b3adabde724e3077bb6cb32d1b3ee Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 May 2017 10:59:52 +0530 Subject: sched/fair: Call cpufreq update util handlers less frequently on UP For SMP systems, update_load_avg() calls the cpufreq update util handlers only for the top level cfs_rq (i.e. rq->cfs). But that is not the case for UP systems. update_load_avg() calls util handler for any cfs_rq for which it is called. This would result in way too many calls from the scheduler to the cpufreq governors when CONFIG_FAIR_GROUP_SCHED is enabled. Reduce the frequency of these calls by copying the behavior from the SMP case, i.e. Only call util handlers for the top level cfs_rq. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Fixes: 536bd00cdbb7 ("sched/fair: Fix !CONFIG_SMP kernel cpufreq governor breakage") Link: http://lkml.kernel.org/r/6abf69a2107525885b616a2c1ec03d9c0946171c.1495603536.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c95880e216f6..139abf2ae2a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2790,6 +2790,29 @@ static inline void update_cfs_shares(struct sched_entity *se) } #endif /* CONFIG_FAIR_GROUP_SCHED */ +static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) +{ + if (&this_rq()->cfs == cfs_rq) { + /* + * There are a few boundary cases this might miss but it should + * get called often enough that that should (hopefully) not be + * a real problem -- added to that it only calls on the local + * CPU, so if we enqueue remotely we'll miss an update, but + * the next tick/schedule should update. + * + * It will not get called when we go idle, because the idle + * thread is a different class (!fair), nor will the utilization + * number include things like RT tasks. + * + * As is, the util number is not freq-invariant (we'd have to + * implement arch_scale_freq_capacity() for that). + * + * See cpu_util(). + */ + cpufreq_update_util(rq_of(cfs_rq), 0); + } +} + #ifdef CONFIG_SMP /* * Approximate: @@ -3276,29 +3299,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} #endif /* CONFIG_FAIR_GROUP_SCHED */ -static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) -{ - if (&this_rq()->cfs == cfs_rq) { - /* - * There are a few boundary cases this might miss but it should - * get called often enough that that should (hopefully) not be - * a real problem -- added to that it only calls on the local - * CPU, so if we enqueue remotely we'll miss an update, but - * the next tick/schedule should update. - * - * It will not get called when we go idle, because the idle - * thread is a different class (!fair), nor will the utilization - * number include things like RT tasks. - * - * As is, the util number is not freq-invariant (we'd have to - * implement arch_scale_freq_capacity() for that). - * - * See cpu_util(). - */ - cpufreq_update_util(rq_of(cfs_rq), 0); - } -} - /* * Unsigned subtract and clamp on underflow. * @@ -3544,7 +3544,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) static inline void update_load_avg(struct sched_entity *se, int not_used1) { - cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); + cfs_rq_util_change(cfs_rq_of(se)); } static inline void -- cgit From 5b713a3d949bd7f8c615a335a8c40297586bc1b1 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 May 2017 10:59:53 +0530 Subject: sched/core: Reuse put_prev_task() Reuse put_prev_task() instead of copying its implementation. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/e2e50578223d05c5e90a9feb964fe1ec5d09a052.1495603536.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0869b20fba81..835a234d35e8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5438,7 +5438,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) */ next = pick_next_task(rq, &fake_task, rf); BUG_ON(!next); - next->sched_class->put_prev_task(rq, next); + put_prev_task(rq, next); /* * Rules for changing task_struct::cpus_allowed are holding -- cgit From c7132dd6f0e3b07bd4541cda9040897cc460d855 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 May 2017 10:59:54 +0530 Subject: sched/fair: Pass 'rq' to weighted_cpuload() weighted_cpuload() uses the cpu number passed to it get pointer to the runqueue. Almost all callers of weighted_cpuload() already have the rq pointer with them and can send that directly to weighted_cpuload(). In some cases the callers actually get the CPU number by doing cpu_of(rq). It would be simpler to pass rq to weighted_cpuload(). Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/b7720627e0576dc29b4ba3f9b6edbc913bb4f684.1495603536.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 139abf2ae2a5..27d425eccaad 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1378,7 +1378,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; } -static unsigned long weighted_cpuload(const int cpu); +static unsigned long weighted_cpuload(struct rq *rq); static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static unsigned long capacity_of(int cpu); @@ -1409,7 +1409,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid) struct rq *rq = cpu_rq(cpu); ns->nr_running += rq->nr_running; - ns->load += weighted_cpuload(cpu); + ns->load += weighted_cpuload(rq); ns->compute_capacity += capacity_of(cpu); cpus++; @@ -5125,9 +5125,9 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load, } /* Used instead of source_load when we know the type == 0 */ -static unsigned long weighted_cpuload(const int cpu) +static unsigned long weighted_cpuload(struct rq *rq) { - return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs); + return cfs_rq_runnable_load_avg(&rq->cfs); } #ifdef CONFIG_NO_HZ_COMMON @@ -5172,7 +5172,7 @@ static void cpu_load_update_idle(struct rq *this_rq) /* * bail if there's load or we're actually up-to-date. */ - if (weighted_cpuload(cpu_of(this_rq))) + if (weighted_cpuload(this_rq)) return; cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0); @@ -5193,7 +5193,7 @@ void cpu_load_update_nohz_start(void) * concurrently we'll exit nohz. And cpu_load write can race with * cpu_load_update_idle() but both updater would be writing the same. */ - this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq)); + this_rq->cpu_load[0] = weighted_cpuload(this_rq); } /* @@ -5209,7 +5209,7 @@ void cpu_load_update_nohz_stop(void) if (curr_jiffies == this_rq->last_load_update_tick) return; - load = weighted_cpuload(cpu_of(this_rq)); + load = weighted_cpuload(this_rq); rq_lock(this_rq, &rf); update_rq_clock(this_rq); cpu_load_update_nohz(this_rq, curr_jiffies, load); @@ -5235,7 +5235,7 @@ static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) */ void cpu_load_update_active(struct rq *this_rq) { - unsigned long load = weighted_cpuload(cpu_of(this_rq)); + unsigned long load = weighted_cpuload(this_rq); if (tick_nohz_tick_stopped()) cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load); @@ -5253,7 +5253,7 @@ void cpu_load_update_active(struct rq *this_rq) static unsigned long source_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(cpu); + unsigned long total = weighted_cpuload(rq); if (type == 0 || !sched_feat(LB_BIAS)) return total; @@ -5268,7 +5268,7 @@ static unsigned long source_load(int cpu, int type) static unsigned long target_load(int cpu, int type) { struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(cpu); + unsigned long total = weighted_cpuload(rq); if (type == 0 || !sched_feat(LB_BIAS)) return total; @@ -5290,7 +5290,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); - unsigned long load_avg = weighted_cpuload(cpu); + unsigned long load_avg = weighted_cpuload(rq); if (nr_running) return load_avg / nr_running; @@ -5550,7 +5550,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) shallowest_idle_cpu = i; } } else if (shallowest_idle_cpu == -1) { - load = weighted_cpuload(i); + load = weighted_cpuload(cpu_rq(i)); if (load < min_load || (load == min_load && i == this_cpu)) { min_load = load; least_loaded_cpu = i; @@ -7363,7 +7363,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, sgs->nr_numa_running += rq->nr_numa_running; sgs->nr_preferred_running += rq->nr_preferred_running; #endif - sgs->sum_weighted_load += weighted_cpuload(i); + sgs->sum_weighted_load += weighted_cpuload(rq); /* * No need to call idle_cpu() if nr_running is not 0 */ @@ -7892,7 +7892,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, capacity = capacity_of(i); - wl = weighted_cpuload(i); + wl = weighted_cpuload(rq); /* * When comparing with imbalance, use weighted_cpuload() -- cgit From 9674f5cad22a590c865a330ce333026b9f9c078b Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 May 2017 10:59:55 +0530 Subject: sched/fair: Avoid checking cfs_rq->nr_running twice Rearrange pick_next_task_fair() a bit to avoid checking cfs_rq->nr_running twice for the case where FAIR_GROUP_SCHED is enabled and the previous task doesn't belong to the fair class. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/000903ab3df3350943d3271c53615893a230dc95.1495603536.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 27d425eccaad..30fd196c0cde 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6187,10 +6187,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf int new_tasks; again: -#ifdef CONFIG_FAIR_GROUP_SCHED if (!cfs_rq->nr_running) goto idle; +#ifdef CONFIG_FAIR_GROUP_SCHED if (prev->sched_class != &fair_sched_class) goto simple; @@ -6220,11 +6220,17 @@ again: /* * This call to check_cfs_rq_runtime() will do the * throttle and dequeue its entity in the parent(s). - * Therefore the 'simple' nr_running test will indeed + * Therefore the nr_running test will indeed * be correct. */ - if (unlikely(check_cfs_rq_runtime(cfs_rq))) + if (unlikely(check_cfs_rq_runtime(cfs_rq))) { + cfs_rq = &rq->cfs; + + if (!cfs_rq->nr_running) + goto idle; + goto simple; + } } se = pick_next_entity(cfs_rq, curr); @@ -6264,12 +6270,8 @@ again: return p; simple: - cfs_rq = &rq->cfs; #endif - if (!cfs_rq->nr_running) - goto idle; - put_prev_task(rq, prev); do { -- cgit From 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 May 2017 10:59:56 +0530 Subject: sched/fair: Drop always true parameter of update_cfs_rq_load_avg() update_freq is always true and there is no need to pass it to update_cfs_rq_load_avg(). Remove it. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/2d28d295f3f591ede7e931462bce1bda5aaa4896.1495603536.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 30fd196c0cde..75c58c77450a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct sched_entity *se) /* * For !fair tasks do: * - update_cfs_rq_load_avg(now, cfs_rq, false); + update_cfs_rq_load_avg(now, cfs_rq); attach_entity_load_avg(cfs_rq, se); switched_from_fair(rq, p); * @@ -3320,7 +3320,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() * @cfs_rq: cfs_rq to update - * @update_freq: should we call cfs_rq_util_change() or will the call do so * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see @@ -3334,7 +3333,7 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {} * call update_tg_load_avg() when this function returns true. */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { struct sched_avg *sa = &cfs_rq->avg; int decayed, removed_load = 0, removed_util = 0; @@ -3362,7 +3361,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - if (update_freq && (decayed || removed_util)) + if (decayed || removed_util) cfs_rq_util_change(cfs_rq); return decayed || removed_load; @@ -3390,7 +3389,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags) if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cpu, cfs_rq, se); - decayed = update_cfs_rq_load_avg(now, cfs_rq, true); + decayed = update_cfs_rq_load_avg(now, cfs_rq); decayed |= propagate_entity_load_avg(se); if (decayed && (flags & UPDATE_TG)) @@ -3534,7 +3533,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf); #else /* CONFIG_SMP */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { return 0; } @@ -6919,7 +6918,7 @@ static void update_blocked_averages(int cpu) if (throttled_hierarchy(cfs_rq)) continue; - if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) + if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) update_tg_load_avg(cfs_rq, 0); /* Propagate pending load changes to the parent, if any: */ @@ -6992,7 +6991,7 @@ static inline void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); + update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); rq_unlock_irqrestore(rq, &rf); } -- cgit From 4d13a06d54c415238325b0fe2c14f1052da4512f Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 13 Apr 2017 14:45:48 +0530 Subject: sched/topology: Drop memset() from init_rootdomain() There are only two callers of init_rootdomain(). One of them passes a global to it and another one sends dynamically allocated root-domain. There is no need to memset the root-domain in the first case as the structure is already reset. Update alloc_rootdomain() to allocate the memory with kzalloc() and remove the memset() call from init_rootdomain(). Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/fc2f6cc90b098040970c85a97046512572d765bc.1492065513.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 79895aec281e..216fee014b32 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -261,8 +261,6 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) static int init_rootdomain(struct root_domain *rd) { - memset(rd, 0, sizeof(*rd)); - if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) @@ -311,7 +309,7 @@ static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; - rd = kmalloc(sizeof(*rd), GFP_KERNEL); + rd = kzalloc(sizeof(*rd), GFP_KERNEL); if (!rd) return NULL; -- cgit From 42d394d41ab90f4ed9d7a7403ed22e8f7590948a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 13 Apr 2017 14:45:49 +0530 Subject: sched/deadline: Don't re-initialize 'struct cpudl' The 'struct cpudl' passed to cpudl_init() is already initialized to zero. Don't do that again. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/bd4c229806bc96694b15546207afcc221387d2f5.1492065513.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/cpudeadline.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index fba235c7d026..bdf448b6556f 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -246,7 +246,6 @@ int cpudl_init(struct cpudl *cp) { int i; - memset(cp, 0, sizeof(*cp)); raw_spin_lock_init(&cp->lock); cp->size = 0; -- cgit From 1c2a4861dbfca373fea1ff2cf9e9793933d024ce Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 13 Apr 2017 14:45:50 +0530 Subject: sched/cpupri: Don't re-initialize 'struct cpupri' The 'struct cpupri' passed to cpupri_init() is already initialized to zero. Don't do that again. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/8a71d48c5a077500b6ddc1a41484c0ac8d3aad94.1492065513.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/cpupri.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 981fcd7dc394..2511aba36b89 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -209,8 +209,6 @@ int cpupri_init(struct cpupri *cp) { int i; - memset(cp, 0, sizeof(*cp)); - for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; -- cgit From 181a80d1f7f453f58c4b47f89084d0849632858c Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 27 Apr 2017 13:58:59 +0530 Subject: sched: Mark pick_next_task_dl() and build_sched_domain() as static pick_next_task_dl() and build_sched_domain() aren't used outside deadline.c and topology.c. Make them static. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Cc: linaro-kernel@lists.linaro.org Link: http://lkml.kernel.org/r/36e4cbb6210002cadae89920ae97e19e7e513008.1493281605.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/deadline.c | 2 +- kernel/sched/topology.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 755bd3f1a1a9..a205ac7cf435 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1655,7 +1655,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, return rb_entry(left, struct sched_dl_entity, rb_node); } -struct task_struct * +static struct task_struct * pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { struct sched_dl_entity *dl_se; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 216fee014b32..bd8b6d6f5387 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1593,7 +1593,7 @@ static void __sdt_free(const struct cpumask *cpu_map) } } -struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, +static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int cpu) { -- cgit From f235a54f00449c611f85173fe8a66c4d189c5ce1 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Sat, 1 Jul 2017 07:06:13 +0200 Subject: sched/pelt: Fix false running accounting The running state is a subset of runnable state which means that running can't be set if runnable (weight) is cleared. There are corner cases where the current sched_entity has been already dequeued but cfs_rq->curr has not been updated yet and still points to the dequeued sched_entity. If ___update_load_avg() is called at that time, weight will be 0 and running will be set which is not possible. This case happens during pick_next_task_fair() when a cfs_rq becomes idles. The current sched_entity has been dequeued so se->on_rq is cleared and cfs_rq->weight is null. But cfs_rq->curr still points to se (it will be cleared when picking the idle thread). Because the cfs_rq becomes idle, idle_balance() is called and ends up to call update_blocked_averages() with these wrong running and runnable states. Add a test in ___update_load_avg() to correct the running state in this case. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Link: http://lkml.kernel.org/r/1498885573-18984-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 75c58c77450a..ef5b66b110f8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2990,6 +2990,18 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa, sa->last_update_time += delta << 10; + /* + * running is a subset of runnable (weight) so running can't be set if + * runnable is clear. But there are some corner cases where the current + * se has been already dequeued but cfs_rq->curr still points to it. + * This means that weight will be 0 but not running for a sched_entity + * but also for a cfs_rq if the latter becomes idle. As an example, + * this happens during idle_balance() which calls + * update_blocked_averages() + */ + if (!weight) + running = 0; + /* * Now we know we crossed measurement unit boundaries. The *_avg * accrues by two steps: -- cgit From 37ec97deb3a8c68a7adfab61beb261ffeab19d09 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 31 Jul 2017 15:28:46 -0400 Subject: sched/numa: Slow down scan rate if shared faults dominate The comment above update_task_scan_period() says the scan period should be increased (scanning slows down) if the majority of memory accesses are on the local node, or if the majority of the page accesses are shared with other tasks. However, with the current code, all a high ratio of shared accesses does is slow down the rate at which scanning is made faster. This patch changes things so either lots of shared accesses or lots of local accesses will slow down scanning, and numa scanning is sped up only when there are lots of private faults on remote memory pages. Signed-off-by: Rik van Riel Signed-off-by: Peter Zijlstra (Intel) Acked-by: Mel Gorman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jhladky@redhat.com Cc: lvenanci@redhat.com Link: http://lkml.kernel.org/r/20170731192847.23050-2-riel@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ef5b66b110f8..cb6b7c83b74d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1892,7 +1892,7 @@ static void update_task_scan_period(struct task_struct *p, unsigned long shared, unsigned long private) { unsigned int period_slot; - int ratio; + int lr_ratio, ps_ratio; int diff; unsigned long remote = p->numa_faults_locality[0]; @@ -1922,25 +1922,36 @@ static void update_task_scan_period(struct task_struct *p, * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) */ period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); - ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); - if (ratio >= NUMA_PERIOD_THRESHOLD) { - int slot = ratio - NUMA_PERIOD_THRESHOLD; + lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); + ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); + + if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { + /* + * Most memory accesses are local. There is no need to + * do fast NUMA scanning, since memory is already local. + */ + int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; + if (!slot) + slot = 1; + diff = slot * period_slot; + } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { + /* + * Most memory accesses are shared with other tasks. + * There is no point in continuing fast NUMA scanning, + * since other tasks may just move the memory elsewhere. + */ + int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; if (!slot) slot = 1; diff = slot * period_slot; } else { - diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; - /* - * Scale scan rate increases based on sharing. There is an - * inverse relationship between the degree of sharing and - * the adjustment made to the scanning period. Broadly - * speaking the intent is that there is little point - * scanning faster if shared accesses dominate as it may - * simply bounce migrations uselessly + * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, + * yet they are not on the local NUMA node. Speed up + * NUMA scanning to get the memory moved over. */ - ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1)); - diff = (diff * ratio) / NUMA_PERIOD_SLOTS; + int ratio = max(lr_ratio, ps_ratio); + diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; } p->numa_scan_period = clamp(p->numa_scan_period + diff, -- cgit From b5dd77c8bdada7b6262d0cba02a6ed525bf4e6e1 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Mon, 31 Jul 2017 15:28:47 -0400 Subject: sched/numa: Scale scan period with tasks in group and shared/private Running 80 tasks in the same group, or as threads of the same process, results in the memory getting scanned 80x as fast as it would be if a single task was using the memory. This really hurts some workloads. Scale the scan period by the number of tasks in the numa group, and the shared / private ratio, so the average rate at which memory in the group is scanned corresponds roughly to the rate at which a single task would scan its memory. Signed-off-by: Rik van Riel Signed-off-by: Peter Zijlstra (Intel) Acked-by: Mel Gorman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jhladky@redhat.com Cc: lvenanci@redhat.com Link: http://lkml.kernel.org/r/20170731192847.23050-3-riel@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 111 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 86 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cb6b7c83b74d..a7f1c3b797f8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1071,6 +1071,29 @@ unsigned int sysctl_numa_balancing_scan_size = 256; /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ unsigned int sysctl_numa_balancing_scan_delay = 1000; +struct numa_group { + atomic_t refcount; + + spinlock_t lock; /* nr_tasks, tasks */ + int nr_tasks; + pid_t gid; + int active_nodes; + + struct rcu_head rcu; + unsigned long total_faults; + unsigned long max_faults_cpu; + /* + * Faults_cpu is used to decide whether memory should move + * towards the CPU. As a consequence, these stats are weighted + * more by CPU use than by memory faults. + */ + unsigned long *faults_cpu; + unsigned long faults[0]; +}; + +static inline unsigned long group_faults_priv(struct numa_group *ng); +static inline unsigned long group_faults_shared(struct numa_group *ng); + static unsigned int task_nr_scan_windows(struct task_struct *p) { unsigned long rss = 0; @@ -1107,13 +1130,47 @@ static unsigned int task_scan_min(struct task_struct *p) return max_t(unsigned int, floor, scan); } +static unsigned int task_scan_start(struct task_struct *p) +{ + unsigned long smin = task_scan_min(p); + unsigned long period = smin; + + /* Scale the maximum scan period with the amount of shared memory. */ + if (p->numa_group) { + struct numa_group *ng = p->numa_group; + unsigned long shared = group_faults_shared(ng); + unsigned long private = group_faults_priv(ng); + + period *= atomic_read(&ng->refcount); + period *= shared + 1; + period /= private + shared + 1; + } + + return max(smin, period); +} + static unsigned int task_scan_max(struct task_struct *p) { - unsigned int smin = task_scan_min(p); - unsigned int smax; + unsigned long smin = task_scan_min(p); + unsigned long smax; /* Watch for min being lower than max due to floor calculations */ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); + + /* Scale the maximum scan period with the amount of shared memory. */ + if (p->numa_group) { + struct numa_group *ng = p->numa_group; + unsigned long shared = group_faults_shared(ng); + unsigned long private = group_faults_priv(ng); + unsigned long period = smax; + + period *= atomic_read(&ng->refcount); + period *= shared + 1; + period /= private + shared + 1; + + smax = max(smax, period); + } + return max(smin, smax); } @@ -1129,26 +1186,6 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p) rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); } -struct numa_group { - atomic_t refcount; - - spinlock_t lock; /* nr_tasks, tasks */ - int nr_tasks; - pid_t gid; - int active_nodes; - - struct rcu_head rcu; - unsigned long total_faults; - unsigned long max_faults_cpu; - /* - * Faults_cpu is used to decide whether memory should move - * towards the CPU. As a consequence, these stats are weighted - * more by CPU use than by memory faults. - */ - unsigned long *faults_cpu; - unsigned long faults[0]; -}; - /* Shared or private faults. */ #define NR_NUMA_HINT_FAULT_TYPES 2 @@ -1198,6 +1235,30 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; } +static inline unsigned long group_faults_priv(struct numa_group *ng) +{ + unsigned long faults = 0; + int node; + + for_each_online_node(node) { + faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; + } + + return faults; +} + +static inline unsigned long group_faults_shared(struct numa_group *ng) +{ + unsigned long faults = 0; + int node; + + for_each_online_node(node) { + faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; + } + + return faults; +} + /* * A node triggering more than 1/3 as many NUMA faults as the maximum is * considered part of a numa group's pseudo-interleaving set. Migrations @@ -1808,7 +1869,7 @@ static int task_numa_migrate(struct task_struct *p) * Reset the scan period if the task is being rescheduled on an * alternative node to recheck if the tasks is now properly placed. */ - p->numa_scan_period = task_scan_min(p); + p->numa_scan_period = task_scan_start(p); if (env.best_task == NULL) { ret = migrate_task_to(p, env.best_cpu); @@ -2459,7 +2520,7 @@ void task_numa_work(struct callback_head *work) if (p->numa_scan_period == 0) { p->numa_scan_period_max = task_scan_max(p); - p->numa_scan_period = task_scan_min(p); + p->numa_scan_period = task_scan_start(p); } next_scan = now + msecs_to_jiffies(p->numa_scan_period); @@ -2587,7 +2648,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) if (now > curr->node_stamp + period) { if (!curr->node_stamp) - curr->numa_scan_period = task_scan_min(curr); + curr->numa_scan_period = task_scan_start(curr); curr->node_stamp += period; if (!time_before(jiffies, curr->mm->numa_next_scan)) { -- cgit From b18c3ca11c20caa4a397baa9b893ebc4aaa4fe9f Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Tue, 23 May 2017 11:00:56 +0900 Subject: sched/deadline: Make find_later_rq() choose a closer CPU in topology When cpudl_find() returns any among free_cpus, the CPU might not be closer than others, considering sched domain. For example: this_cpu: 15 free_cpus: 0, 1,..., 14 (== later_mask) best_cpu: 0 topology: 0 --+ +--+ 1 --+ | +-- ... --+ 2 --+ | | +--+ | 3 --+ | ... ... 12 --+ | +--+ | 13 --+ | | +-- ... -+ 14 --+ | +--+ 15 --+ In this case, it would be best to select 14 since it's a free CPU and closest to 15 (this_cpu). However, currently the code selects 0 (best_cpu) even though that's just any among free_cpus. Fix it. This (re)aligns the deadline behaviour with the rt behaviour. Signed-off-by: Byungchul Park Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1495504859-10960-2-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar --- kernel/sched/deadline.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a205ac7cf435..ac07d7c3a978 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1798,7 +1798,7 @@ static int find_later_rq(struct task_struct *task) struct sched_domain *sd; struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); int this_cpu = smp_processor_id(); - int best_cpu, cpu = task_cpu(task); + int cpu = task_cpu(task); /* Make sure the mask is initialized first */ if (unlikely(!later_mask)) @@ -1811,17 +1811,14 @@ static int find_later_rq(struct task_struct *task) * We have to consider system topology and task affinity * first, then we can look for a suitable cpu. */ - best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, - task, later_mask); - if (best_cpu == -1) + if (cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask) == -1) return -1; /* - * If we are here, some target has been found, - * the most suitable of which is cached in best_cpu. - * This is, among the runqueues where the current tasks - * have later deadlines than the task's one, the rq - * with the latest possible one. + * If we are here, some targets have been found, including + * the most suitable which is, among the runqueues where the + * current tasks have later deadlines than the task's one, the + * rq with the latest possible one. * * Now we check how well this matches with task's * affinity and system topology. @@ -1841,6 +1838,7 @@ static int find_later_rq(struct task_struct *task) rcu_read_lock(); for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_AFFINE) { + int best_cpu; /* * If possible, preempting this_cpu is @@ -1852,12 +1850,15 @@ static int find_later_rq(struct task_struct *task) return this_cpu; } + best_cpu = cpumask_first_and(later_mask, + sched_domain_span(sd)); /* - * Last chance: if best_cpu is valid and is - * in the mask, that becomes our choice. + * Last chance: if a cpu being in both later_mask + * and current sd span is valid, that becomes our + * choice. Of course, the latest possible cpu is + * already under consideration through later_mask. */ - if (best_cpu < nr_cpu_ids && - cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { + if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; } -- cgit From 3261ed0b25098f92d36d5ad14524254d8c7fba54 Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Tue, 23 May 2017 11:00:57 +0900 Subject: sched/deadline: Change return value of cpudl_find() cpudl_find() users are only interested in knowing if suitable CPU(s) were found or not (and then they look at later_mask to know which). Change cpudl_find() return type accordingly. Aligns with rt code. Signed-off-by: Byungchul Park Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1495504859-10960-3-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar --- kernel/sched/cpudeadline.c | 26 +++++++++++++------------- kernel/sched/deadline.c | 6 +++--- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index bdf448b6556f..8d9562d890d3 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -119,29 +119,29 @@ static inline int cpudl_maximum(struct cpudl *cp) * @p: the task * @later_mask: a mask to fill in with the selected CPUs (or NULL) * - * Returns: int - best CPU (heap maximum if suitable) + * Returns: int - CPUs were found */ int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask) { - int best_cpu = -1; const struct sched_dl_entity *dl_se = &p->dl; if (later_mask && cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { - best_cpu = cpumask_any(later_mask); - goto out; - } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && - dl_time_before(dl_se->deadline, cp->elements[0].dl)) { - best_cpu = cpudl_maximum(cp); - if (later_mask) - cpumask_set_cpu(best_cpu, later_mask); - } + return 1; + } else { + int best_cpu = cpudl_maximum(cp); + WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); -out: - WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); + if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && + dl_time_before(dl_se->deadline, cp->elements[0].dl)) { + if (later_mask) + cpumask_set_cpu(best_cpu, later_mask); - return best_cpu; + return 1; + } + } + return 0; } /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ac07d7c3a978..d05bd9457a40 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1594,7 +1594,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * let's hope p can move out. */ if (rq->curr->nr_cpus_allowed == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) + !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) return; /* @@ -1602,7 +1602,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) * see if it is pushed or pulled somewhere else. */ if (p->nr_cpus_allowed != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) + cpudl_find(&rq->rd->cpudl, p, NULL)) return; resched_curr(rq); @@ -1811,7 +1811,7 @@ static int find_later_rq(struct task_struct *task) * We have to consider system topology and task affinity * first, then we can look for a suitable cpu. */ - if (cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask) == -1) + if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) return -1; /* -- cgit From 18f08dae19990f5fffde92e3a63e0d90cda0f1a8 Mon Sep 17 00:00:00 2001 From: Cheng Jian Date: Fri, 4 Aug 2017 17:19:37 +0800 Subject: sched/core: Remove unnecessary initialization init_idle_bootup_task() init_idle_bootup_task( ) is called in rest_init( ) to switch the scheduling class of the boot thread to the idle class. the function only sets: idle->sched_class = &idle_sched_class; which has been set in init_idle() called by sched_init(): /* * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; We've already set the boot thread to idle class in start_kernel()->sched_init()->init_idle() so it's unnecessary to set it again in start_kernel()->rest_init()->init_idle_bootup_task() Signed-off-by: Cheng Jian Signed-off-by: Xie XiuQi Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1501838377-109720-1-git-send-email-cj.chengjian@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 835a234d35e8..6d91c10b1814 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5177,11 +5177,6 @@ void show_state_filter(unsigned long state_filter) debug_show_all_locks(); } -void init_idle_bootup_task(struct task_struct *idle) -{ - idle->sched_class = &idle_sched_class; -} - /** * init_idle - set up an idle thread for a given CPU * @idle: task in question -- cgit From 74dc3384fc7983b78cc46ebb1824968a3db85eb1 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sun, 6 Aug 2017 14:41:41 +1000 Subject: sched/debug: Use task_pid_nr_ns in /proc/$pid/sched It appears as though the addition of the PID namespace did not update the output code for /proc/*/sched, which resulted in it providing PIDs that were not self-consistent with the /proc mount. This additionally made it trivial to detect whether a process was inside &init_pid_ns from userspace, making container detection trivial: https://github.com/jessfraz/amicontained This leads to situations such as: % unshare -pmf % mount -t proc proc /proc % head -n1 /proc/1/sched head (10047, #threads: 1) Fix this by just using task_pid_nr_ns for the output of /proc/*/sched. All of the other uses of task_pid_nr in kernel/sched/debug.c are from a sysctl context and thus don't need to be namespaced. Signed-off-by: Aleksa Sarai Signed-off-by: Peter Zijlstra (Intel) Acked-by: Eric W. Biederman Cc: Jess Frazelle Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: cyphar@cyphar.com Link: http://lkml.kernel.org/r/20170806044141.5093-1-asarai@suse.com Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 4fa66de52bd6..ac345115877b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -872,11 +872,12 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) #endif } -void proc_sched_show_task(struct task_struct *p, struct seq_file *m) +void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + struct seq_file *m) { unsigned long nr_switches; - SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), + SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), get_nr_threads(p)); SEQ_printf(m, "---------------------------------------------------------" -- cgit From e8c164954b926f06f109a42fb8595ed01275b141 Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Mon, 7 Aug 2017 16:44:22 +0800 Subject: sched/debug: Show task state in /proc/sched_debug Currently we print the runnable task in /proc/sched_debug, but there is no task state information. We don't know which task is in the runqueue and which task is sleeping. Add task state in the runnable task list, like this: runnable tasks: S task PID tree-key switches prio wait-time sum-exec sum-sleep ----------------------------------------------------------------------------------------------------------- S watchdog/239 1452 -11.917445 2811 0 0.000000 8.949306 0.000000 7 0 / S migration/239 1453 20686.367740 8 0 0.000000 16215.720897 0.000000 7 0 / S ksoftirqd/239 1454 115383.841071 12 120 0.000000 0.200683 0.000000 7 0 / >R test 21287 4872.190970 407 120 0.000000 4874.911790 0.000000 7 0 /autogroup-150 R test 21288 4868.385454 401 120 0.000000 3672.341489 0.000000 7 0 /autogroup-150 R test 21289 4868.326776 384 120 0.000000 3424.934159 0.000000 7 0 /autogroup-150 Signed-off-by: Xie XiuQi Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1502095463-160172-2-git-send-email-xiexiuqi@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index ac345115877b..d8d2ea215b85 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -421,13 +421,19 @@ static char *task_group_path(struct task_group *tg) } #endif +static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; + static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { - if (rq->curr == p) - SEQ_printf(m, "R"); - else - SEQ_printf(m, " "); + unsigned long state; + + if (rq->curr == p) { + SEQ_printf(m, ">R"); + } else { + state = p->state ? __ffs(p->state) + 1 : 0; + SEQ_printf(m, " %c", state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); + } SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", p->comm, task_pid_nr(p), @@ -456,9 +462,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) SEQ_printf(m, "\nrunnable tasks:\n" - " task PID tree-key switches prio" + " S task PID tree-key switches prio" " wait-time sum-exec sum-sleep\n" - "------------------------------------------------------" + "-------------------------------------------------------" "----------------------------------------------------\n"); rcu_read_lock(); -- cgit From 20435d84e5f2041c64c792399ab6f2948a2c2252 Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Mon, 7 Aug 2017 16:44:23 +0800 Subject: sched/debug: Intruduce task_state_to_char() helper function Now that we have more than one place to get the task state, intruduce the task_state_to_char() helper function to save some code. No functionality changed. Signed-off-by: Xie XiuQi Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1502095463-160172-3-git-send-email-xiexiuqi@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 15 ++++----------- kernel/sched/debug.c | 10 +++------- 2 files changed, 7 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6d91c10b1814..f9f9948e2470 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5103,24 +5103,17 @@ out_unlock: return retval; } -static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; - void sched_show_task(struct task_struct *p) { unsigned long free = 0; int ppid; - unsigned long state = p->state; - - /* Make sure the string lines up properly with the number of task states: */ - BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); if (!try_get_task_stack(p)) return; - if (state) - state = __ffs(state) + 1; - printk(KERN_INFO "%-15.15s %c", p->comm, - state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); - if (state == TASK_RUNNING) + + printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); + + if (p->state == TASK_RUNNING) printk(KERN_CONT " running task "); #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index d8d2ea215b85..cfd84f79e075 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -426,14 +426,10 @@ static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { - unsigned long state; - - if (rq->curr == p) { + if (rq->curr == p) SEQ_printf(m, ">R"); - } else { - state = p->state ? __ffs(p->state) + 1 : 0; - SEQ_printf(m, " %c", state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); - } + else + SEQ_printf(m, " %c", task_state_to_char(p)); SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", p->comm, task_pid_nr(p), -- cgit From 90001d67be2fa2acbe3510d1f64fa6533efa30ef Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 31 Jul 2017 17:50:05 +0200 Subject: sched/fair: Fix wake_affine() for !NUMA_BALANCING In commit: 3fed382b46ba ("sched/numa: Implement NUMA node level wake_affine()") Rik changed wake_affine to consider NUMA information when balancing between LLC domains. There are a number of problems here which this patch tries to address: - LLC < NODE; in this case we'd use the wrong information to balance - !NUMA_BALANCING: in this case, the new code doesn't do any balancing at all - re-computes the NUMA data for every wakeup, this can mean iterating up to 64 CPUs for every wakeup. - default affine wakeups inside a cache We address these by saving the load/capacity values for each sched_domain during regular load-balance and using these values in wake_affine_llc(). The obvious down-side to using cached values is that they can be too old and poorly reflect reality. But this way we can use LLC wide information and thus not rely on assuming LLC matches NODE. We also don't rely on NUMA_BALANCING nor do we have to aggegate two nodes (or even cache domains) worth of CPUs for each wakeup. Signed-off-by: Peter Zijlstra (Intel) Cc: Josef Bacik Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Fixes: 3fed382b46ba ("sched/numa: Implement NUMA node level wake_affine()") [ Minor readability improvements. ] Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 190 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 122 insertions(+), 68 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a7f1c3b797f8..8d5868771cb3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2658,59 +2658,6 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) } } -/* - * Can a task be moved from prev_cpu to this_cpu without causing a load - * imbalance that would trigger the load balancer? - */ -static inline bool numa_wake_affine(struct sched_domain *sd, - struct task_struct *p, int this_cpu, - int prev_cpu, int sync) -{ - struct numa_stats prev_load, this_load; - s64 this_eff_load, prev_eff_load; - - update_numa_stats(&prev_load, cpu_to_node(prev_cpu)); - update_numa_stats(&this_load, cpu_to_node(this_cpu)); - - /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) { - unsigned long current_load = task_h_load(current); - - if (this_load.load > current_load) - this_load.load -= current_load; - else - this_load.load = 0; - } - - /* - * In low-load situations, where this_cpu's node is idle due to the - * sync cause above having dropped this_load.load to 0, move the task. - * Moving to an idle socket will not create a bad imbalance. - * - * Otherwise check if the nodes are near enough in load to allow this - * task to be woken on this_cpu's node. - */ - if (this_load.load > 0) { - unsigned long task_load = task_h_load(p); - - this_eff_load = 100; - this_eff_load *= prev_load.compute_capacity; - - prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; - prev_eff_load *= this_load.compute_capacity; - - this_eff_load *= this_load.load + task_load; - prev_eff_load *= prev_load.load - task_load; - - return this_eff_load <= prev_eff_load; - } - - return true; -} #else static void task_tick_numa(struct rq *rq, struct task_struct *curr) { @@ -2724,14 +2671,6 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) { } -#ifdef CONFIG_SMP -static inline bool numa_wake_affine(struct sched_domain *sd, - struct task_struct *p, int this_cpu, - int prev_cpu, int sync) -{ - return true; -} -#endif /* !SMP */ #endif /* CONFIG_NUMA_BALANCING */ static void @@ -5428,20 +5367,115 @@ static int wake_wide(struct task_struct *p) return 1; } +struct llc_stats { + unsigned long nr_running; + unsigned long load; + unsigned long capacity; + int has_capacity; +}; + +static bool get_llc_stats(struct llc_stats *stats, int cpu) +{ + struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); + + if (!sds) + return false; + + stats->nr_running = READ_ONCE(sds->nr_running); + stats->load = READ_ONCE(sds->load); + stats->capacity = READ_ONCE(sds->capacity); + stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu); + + return true; +} + +/* + * Can a task be moved from prev_cpu to this_cpu without causing a load + * imbalance that would trigger the load balancer? + * + * Since we're running on 'stale' values, we might in fact create an imbalance + * but recomputing these values is expensive, as that'd mean iteration 2 cache + * domains worth of CPUs. + */ +static bool +wake_affine_llc(struct sched_domain *sd, struct task_struct *p, + int this_cpu, int prev_cpu, int sync) +{ + struct llc_stats prev_stats, this_stats; + s64 this_eff_load, prev_eff_load; + unsigned long task_load; + + if (!get_llc_stats(&prev_stats, prev_cpu) || + !get_llc_stats(&this_stats, this_cpu)) + return false; + + /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current LLC. + */ + if (sync) { + unsigned long current_load = task_h_load(current); + + /* in this case load hits 0 and this LLC is considered 'idle' */ + if (current_load > this_stats.load) + return true; + + this_stats.load -= current_load; + } + + /* + * The has_capacity stuff is not SMT aware, but by trying to balance + * the nr_running on both ends we try and fill the domain at equal + * rates, thereby first consuming cores before siblings. + */ + + /* if the old cache has capacity, stay there */ + if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1) + return false; + + /* if this cache has capacity, come here */ + if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1) + return true; + + /* + * Check to see if we can move the load without causing too much + * imbalance. + */ + task_load = task_h_load(p); + + this_eff_load = 100; + this_eff_load *= prev_stats.capacity; + + prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; + prev_eff_load *= this_stats.capacity; + + this_eff_load *= this_stats.load + task_load; + prev_eff_load *= prev_stats.load - task_load; + + return this_eff_load <= prev_eff_load; +} + static int wake_affine(struct sched_domain *sd, struct task_struct *p, int prev_cpu, int sync) { int this_cpu = smp_processor_id(); - bool affine = false; + bool affine; /* - * Common case: CPUs are in the same socket, and select_idle_sibling() - * will do its thing regardless of what we return: + * Default to no affine wakeups; wake_affine() should not effect a task + * placement the load-balancer feels inclined to undo. The conservative + * option is therefore to not move tasks when they wake up. */ - if (cpus_share_cache(prev_cpu, this_cpu)) - affine = true; - else - affine = numa_wake_affine(sd, p, this_cpu, prev_cpu, sync); + affine = false; + + /* + * If the wakeup is across cache domains, try to evaluate if movement + * makes sense, otherwise rely on select_idle_siblings() to do + * placement inside the cache domain. + */ + if (!cpus_share_cache(prev_cpu, this_cpu)) + affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); if (affine) { @@ -7121,6 +7155,7 @@ struct sg_lb_stats { struct sd_lb_stats { struct sched_group *busiest; /* Busiest group in this sd */ struct sched_group *local; /* Local group in this sd */ + unsigned long total_running; unsigned long total_load; /* Total load of all groups in sd */ unsigned long total_capacity; /* Total capacity of all groups in sd */ unsigned long avg_load; /* Average load across all groups in sd */ @@ -7140,6 +7175,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds) *sds = (struct sd_lb_stats){ .busiest = NULL, .local = NULL, + .total_running = 0UL, .total_load = 0UL, .total_capacity = 0UL, .busiest_stat = { @@ -7575,6 +7611,7 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq) */ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) { + struct sched_domain_shared *shared = env->sd->shared; struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; struct sg_lb_stats *local = &sds->local_stat; @@ -7631,6 +7668,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd next_group: /* Now, start updating sd_lb_stats */ + sds->total_running += sgs->sum_nr_running; sds->total_load += sgs->group_load; sds->total_capacity += sgs->group_capacity; @@ -7646,6 +7684,21 @@ next_group: env->dst_rq->rd->overload = overload; } + if (!shared) + return; + + /* + * Since these are sums over groups they can contain some CPUs + * multiple times for the NUMA domains. + * + * Currently only wake_affine_llc() and find_busiest_group() + * uses these numbers, only the last is affected by this problem. + * + * XXX fix that. + */ + WRITE_ONCE(shared->nr_running, sds->total_running); + WRITE_ONCE(shared->load, sds->total_load); + WRITE_ONCE(shared->capacity, sds->total_capacity); } /** @@ -7875,6 +7928,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; + /* XXX broken for overlapping NUMA groups */ sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) / sds.total_capacity; -- cgit From 1e58565e6d147751d85ee9e692d1226059b3318f Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 2 Aug 2017 14:13:00 +0530 Subject: sched/autogroup: Fix error reporting printk text in autogroup_create() Its kzalloc() not kmalloc() which has failed earlier. While here remove a redundant empty line. Signed-off-by: Anshuman Khandual Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170802084300.29487-1-khandual@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/autogroup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index da39489d2d80..de6d7f4dfcb5 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -71,7 +71,6 @@ static inline struct autogroup *autogroup_create(void) goto out_fail; tg = sched_create_group(&root_task_group); - if (IS_ERR(tg)) goto out_free; @@ -101,7 +100,7 @@ out_free: out_fail: if (printk_ratelimit()) { printk(KERN_WARNING "autogroup_create: %s failure.\n", - ag ? "sched_create_group()" : "kmalloc()"); + ag ? "sched_create_group()" : "kzalloc()"); } return autogroup_kref_get(&autogroup_default); -- cgit From 9c8783201cb58e9af8ddeb0cc68f37b0a44ca16c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 16 Aug 2017 13:12:02 -0400 Subject: sched/completion: Document that reinit_completion() must be called after complete_all() The complete_all() function modifies the completion's "done" variable to UINT_MAX, and no other caller (wait_for_completion(), etc) will modify it back to zero. That means that any call to complete_all() must have a reinit_completion() before that completion can be used again. Document this fact by the complete_all() function. Also document that completion_done() will always return true if complete_all() is called. Signed-off-by: Steven Rostedt (VMware) Acked-by: Linus Torvalds Cc: Andrew Morton Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170816131202.195c2f4b@gandalf.local.home Signed-off-by: Ingo Molnar --- kernel/sched/completion.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 13fc5ae9bf2f..2950f446820d 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -47,6 +47,13 @@ EXPORT_SYMBOL(complete); * * It may be assumed that this function implies a write memory barrier before * changing the task state if and only if any tasks are woken up. + * + * Since complete_all() sets the completion of @x permanently to done + * to allow multiple waiters to finish, a call to reinit_completion() + * must be used on @x if @x is to be used again. The code must make + * sure that all waiters have woken and finished before reinitializing + * @x. Also note that the function completion_done() can not be used + * to know if there are still waiters after complete_all() has been called. */ void complete_all(struct completion *x) { @@ -297,6 +304,7 @@ EXPORT_SYMBOL(try_wait_for_completion); * Return: 0 if there are waiters (wait_for_completion() in progress) * 1 if there are no waiters. * + * Note, this will always return true if complete_all() was called on @X. */ bool completion_done(struct completion *x) { -- cgit From 213c5a459ae0a7ef0a092f576aae2d5db6819360 Mon Sep 17 00:00:00 2001 From: Shu Wang Date: Thu, 10 Aug 2017 15:52:16 +0800 Subject: sched/topology: Fix memory leak in __sdt_alloc() Found this issue by kmemleak: the 'sg' and 'sgc' pointers from __sdt_alloc() might be leaked as each domain holds many groups' ref, but in destroy_sched_domain(), it only declined the first group ref. Onlining and offlining a CPU can trigger this leak, and cause OOM. Reproducer for my 6 CPUs machine: while true do echo 0 > /sys/devices/system/cpu/cpu5/online; echo 1 > /sys/devices/system/cpu/cpu5/online; done unreferenced object 0xffff88007d772a80 (size 64): comm "cpuhp/5", pid 39, jiffies 4294719962 (age 35.251s) hex dump (first 32 bytes): c0 22 77 7d 00 88 ff ff 02 00 00 00 01 00 00 00 ."w}............ 40 2a 77 7d 00 88 ff ff 00 00 00 00 00 00 00 00 @*w}............ backtrace: [] kmemleak_alloc+0x4a/0xa0 [] __kmalloc_node+0xf1/0x280 [] build_sched_domains+0x1e8/0xf20 [] partition_sched_domains+0x304/0x360 [] cpuset_update_active_cpus+0x17/0x40 [] sched_cpu_activate+0xae/0xc0 [] cpuhp_invoke_callback+0x90/0x400 [] cpuhp_up_callbacks+0x37/0xb0 [] cpuhp_thread_fun+0xd7/0xf0 [] smpboot_thread_fn+0x110/0x160 [] kthread+0x109/0x140 [] ret_from_fork+0x25/0x30 [] 0xffffffffffffffff unreferenced object 0xffff88007d772a40 (size 64): comm "cpuhp/5", pid 39, jiffies 4294719962 (age 35.251s) hex dump (first 32 bytes): 03 00 00 00 00 00 00 00 00 04 00 00 00 00 00 00 ................ 00 04 00 00 00 00 00 00 4f 3c fc ff 00 00 00 00 ........O<...... backtrace: [] kmemleak_alloc+0x4a/0xa0 [] __kmalloc_node+0xf1/0x280 [] build_sched_domains+0xead/0xf20 [] partition_sched_domains+0x304/0x360 [] cpuset_update_active_cpus+0x17/0x40 [] sched_cpu_activate+0xae/0xc0 [] cpuhp_invoke_callback+0x90/0x400 [] cpuhp_up_callbacks+0x37/0xb0 [] cpuhp_thread_fun+0xd7/0xf0 [] smpboot_thread_fn+0x110/0x160 [] kthread+0x109/0x140 [] ret_from_fork+0x25/0x30 [] 0xffffffffffffffff Reported-by: Chunyu Hu Signed-off-by: Shu Wang Signed-off-by: Peter Zijlstra (Intel) Acked-by: Chunyu Hu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: liwang@redhat.com Link: http://lkml.kernel.org/r/1502351536-9108-1-git-send-email-shuwang@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index bd8b6d6f5387..4197f1346153 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -335,7 +335,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc) if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) kfree(sg->sgc); - kfree(sg); + if (atomic_dec_and_test(&sg->ref)) + kfree(sg); sg = tmp; } while (sg != first); } @@ -343,15 +344,11 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc) static void destroy_sched_domain(struct sched_domain *sd) { /* - * If its an overlapping domain it has private groups, iterate and - * nuke them all. + * A sched domain has many groups' reference, and an overlapping + * domain has private groups, iterate and nuke them all. */ - if (sd->flags & SD_OVERLAP) { - free_sched_groups(sd->groups, 1); - } else if (atomic_dec_and_test(&sd->groups->ref)) { - kfree(sd->groups->sgc); - kfree(sd->groups); - } + free_sched_groups(sd->groups, 1); + if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) kfree(sd->shared); kfree(sd); @@ -668,6 +665,7 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) else cpumask_copy(sg_span, sched_domain_span(sd)); + atomic_inc(&sg->ref); return sg; } -- cgit From a090c4f2cd2cfeb54eaf8ad1e726a6c485f3abc3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 21 Aug 2017 15:42:52 +0200 Subject: sched/topology: Improve comments Mike provided a better comment for destroy_sched_domain() ... Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4197f1346153..179b90b60ec6 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -344,8 +344,9 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc) static void destroy_sched_domain(struct sched_domain *sd) { /* - * A sched domain has many groups' reference, and an overlapping - * domain has private groups, iterate and nuke them all. + * A normal sched domain may have multiple group references, an + * overlapping domain, having private groups, only one. Iterate, + * dropping group/capacity references, freeing where none remain. */ free_sched_groups(sd->groups, 1); -- cgit From 77d1dfda0e79b41894880418f04794e92e4350e2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Aug 2017 12:16:24 +0200 Subject: sched/topology, cpuset: Avoid spurious/wrong domain rebuilds When disabling cpuset.sched_load_balance we expect to be able to online CPUs without generating sched_domains. However this is currently completely broken. What happens is that we generate the sched_domains and then destroy them. This is because of the spurious 'default' domain build in cpuset_update_active_cpus(). That builds a single machine wide domain and then schedules a work to build the 'real' domains. The work then finds there are _no_ domains and destroys the lot again. Furthermore, if there actually were cpusets, building the machine wide domain is actively wrong, because it would allow tasks to 'escape' their cpuset. Also I don't think its needed, the scheduler really should respect the active mask. Reported-by: Ofer Levi(SW) Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Tejun Heo Cc: Thomas Gleixner Cc: Vineet.Gupta1@synopsys.com Cc: rusty@rustcorp.com.au Signed-off-by: Ingo Molnar --- kernel/cgroup/cpuset.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 8d5151688504..1d2369451939 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -2343,13 +2343,7 @@ void cpuset_update_active_cpus(void) * We're inside cpu hotplug critical region which usually nests * inside cgroup synchronization. Bounce actual hotplug processing * to a work item to avoid reverse locking order. - * - * We still need to do partition_sched_domains() synchronously; - * otherwise, the scheduler will get confused and put tasks to the - * dead CPU. Fall back to the default single domain. - * cpuset_hotplug_workfn() will rebuild it as necessary. */ - partition_sched_domains(1, NULL, NULL); schedule_work(&cpuset_hotplug_work); } -- cgit From 09e0dd8e0f2e197690d34fed8cb4737114d3dd5f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Aug 2017 12:16:24 +0200 Subject: sched/topology: Avoid pointless rebuild Fix partition_sched_domains() to try and preserve the existing machine wide domain instead of unconditionally destroying it. We do this by attempting to allocate the new single domain, only when that fails to we reuse the fallback_doms. When using fallback_doms we need to first destroy and then recreate because both the old and new could be backed by it. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Ofer Levi(SW) Cc: Peter Zijlstra Cc: Tejun Heo Cc: Thomas Gleixner Cc: Vineet.Gupta1@synopsys.com Cc: rusty@rustcorp.com.au Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 179b90b60ec6..727daa2a0abe 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1851,7 +1851,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], /* Let the architecture update CPU core mappings: */ new_topology = arch_update_cpu_topology(); - n = doms_new ? ndoms_new : 0; + if (!doms_new) { + WARN_ON_ONCE(dattr_new); + n = 0; + doms_new = alloc_sched_domains(1); + if (doms_new) { + n = 1; + cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); + } + } else { + n = ndoms_new; + } /* Destroy deleted domains: */ for (i = 0; i < ndoms_cur; i++) { @@ -1867,11 +1877,10 @@ match1: } n = ndoms_cur; - if (doms_new == NULL) { + if (!doms_new) { n = 0; doms_new = &fallback_doms; cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); - WARN_ON_ONCE(dattr_new); } /* Build new domains: */ -- cgit From bbdacdfed2f5fa50a2cc9f500a36e05990a0837d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 10 Aug 2017 17:10:26 +0200 Subject: sched/debug: Optimize sched_domain sysctl generation Currently we unconditionally destroy all sysctl bits and regenerate them after we've rebuild the domains (even if that rebuild is a no-op). And since we unconditionally (re)build the sysctl for all possible CPUs, onlining all CPUs gets us O(n^2) time. Instead change this to only rebuild the bits for CPUs we've actually installed new domains on. Reported-by: Ofer Levi(SW) Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 68 +++++++++++++++++++++++++++++++++++++++---------- kernel/sched/sched.h | 4 +++ kernel/sched/topology.c | 1 + 3 files changed, 59 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index cfd84f79e075..4a23bbc3111b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -327,38 +327,78 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) return table; } +static cpumask_var_t sd_sysctl_cpus; static struct ctl_table_header *sd_sysctl_header; + void register_sched_domain_sysctl(void) { - int i, cpu_num = num_possible_cpus(); - struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); + static struct ctl_table *cpu_entries; + static struct ctl_table **cpu_idx; char buf[32]; + int i; - WARN_ON(sd_ctl_dir[0].child); - sd_ctl_dir[0].child = entry; + if (!cpu_entries) { + cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); + if (!cpu_entries) + return; - if (entry == NULL) - return; + WARN_ON(sd_ctl_dir[0].child); + sd_ctl_dir[0].child = cpu_entries; + } - for_each_possible_cpu(i) { - snprintf(buf, 32, "cpu%d", i); - entry->procname = kstrdup(buf, GFP_KERNEL); - entry->mode = 0555; - entry->child = sd_alloc_ctl_cpu_table(i); - entry++; + if (!cpu_idx) { + struct ctl_table *e = cpu_entries; + + cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); + if (!cpu_idx) + return; + + /* deal with sparse possible map */ + for_each_possible_cpu(i) { + cpu_idx[i] = e; + e++; + } + } + + if (!cpumask_available(sd_sysctl_cpus)) { + if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) + return; + + /* init to possible to not have holes in @cpu_entries */ + cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); + } + + for_each_cpu(i, sd_sysctl_cpus) { + struct ctl_table *e = cpu_idx[i]; + + if (e->child) + sd_free_ctl_entry(&e->child); + + if (!e->procname) { + snprintf(buf, 32, "cpu%d", i); + e->procname = kstrdup(buf, GFP_KERNEL); + } + e->mode = 0555; + e->child = sd_alloc_ctl_cpu_table(i); + + __cpumask_clear_cpu(i, sd_sysctl_cpus); } WARN_ON(sd_sysctl_header); sd_sysctl_header = register_sysctl_table(sd_ctl_root); } +void dirty_sched_domain_sysctl(int cpu) +{ + if (cpumask_available(sd_sysctl_cpus)) + __cpumask_set_cpu(cpu, sd_sysctl_cpus); +} + /* may be called multiple times per register */ void unregister_sched_domain_sysctl(void) { unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; - if (sd_ctl_dir[0].child) - sd_free_ctl_entry(&sd_ctl_dir[0].child); } #endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SMP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index eeef1a3086d1..25e5cb1107f3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1120,11 +1120,15 @@ extern int group_balance_cpu(struct sched_group *sg); #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) void register_sched_domain_sysctl(void); +void dirty_sched_domain_sysctl(int cpu); void unregister_sched_domain_sysctl(void); #else static inline void register_sched_domain_sysctl(void) { } +static inline void dirty_sched_domain_sysctl(int cpu) +{ +} static inline void unregister_sched_domain_sysctl(void) { } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 727daa2a0abe..6f7b43982f73 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -459,6 +459,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) rq_attach_root(rq, rd); tmp = rq->sd; rcu_assign_pointer(rq->sd, sd); + dirty_sched_domain_sysctl(cpu); destroy_sched_domains(tmp); update_top_cache_domain(cpu); -- cgit