summaryrefslogtreecommitdiff
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2025-05-28 10:09:11 +0200
committerIngo Molnar <mingo@kernel.org>2025-06-13 08:47:20 +0200
commit02fb885ebdc4ad029aabff4b85dcbc540d7cdb32 (patch)
treeb13bdbc7694c4366e6ae80e3f27fad492c31ce4c /kernel/sched/debug.c
parent6324dce8f6262ec2049494af311e5418bc733341 (diff)
sched/smp: Use the SMP version of scheduler debugging data
Simplify the scheduler by making CONFIG_SMP=y debug output unconditional. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20250528080924.2273858-31-mingo@kernel.org
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c14
1 files changed, 0 insertions, 14 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 04c0354f05e4..f16d53928bb9 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -169,8 +169,6 @@ static const struct file_operations sched_feat_fops = {
.release = single_release,
};
-#ifdef CONFIG_SMP
-
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -217,8 +215,6 @@ static const struct file_operations sched_scaling_fops = {
.release = single_release,
};
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_PREEMPT_DYNAMIC
static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
@@ -511,7 +507,6 @@ static __init int sched_init_debug(void)
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
-#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -519,7 +514,6 @@ static __init int sched_init_debug(void)
sched_domains_mutex_lock();
update_sched_domain_debugfs();
sched_domains_mutex_unlock();
-#endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
@@ -685,11 +679,9 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
}
P(se->load.weight);
-#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
P(se->avg.runnable_avg);
-#endif /* CONFIG_SMP */
#undef PN_SCHEDSTAT
#undef PN
@@ -849,7 +841,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
-#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
@@ -870,7 +861,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
#endif /* CONFIG_FAIR_GROUP_SCHED */
-#endif /* CONFIG_SMP */
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
@@ -967,12 +957,10 @@ do { \
#undef P
#undef PN
-#ifdef CONFIG_SMP
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
P64(avg_idle);
P64(max_idle_balance_cost);
#undef P64
-#endif /* CONFIG_SMP */
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
if (schedstat_enabled()) {
@@ -1242,7 +1230,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
__PS("nr_involuntary_switches", p->nivcsw);
P(se.load.weight);
-#ifdef CONFIG_SMP
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
@@ -1251,7 +1238,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
-#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);