summaryrefslogtreecommitdiff
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c107
1 files changed, 45 insertions, 62 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a48b2a701ec2..41caa22e0680 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -6,6 +6,9 @@
*
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*/
+#include <linux/debugfs.h>
+#include <linux/nmi.h>
+#include "sched.h"
/*
* This allows printing both to /sys/kernel/debug/sched/debug and
@@ -90,10 +93,10 @@ static void sched_feat_enable(int i)
{
static_key_enable_cpuslocked(&sched_feat_keys[i]);
}
-#else
+#else /* !CONFIG_JUMP_LABEL: */
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
-#endif /* CONFIG_JUMP_LABEL */
+#endif /* !CONFIG_JUMP_LABEL */
static int sched_feat_set(char *cmp)
{
@@ -166,8 +169,6 @@ static const struct file_operations sched_feat_fops = {
.release = single_release,
};
-#ifdef CONFIG_SMP
-
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -214,8 +215,6 @@ static const struct file_operations sched_scaling_fops = {
.release = single_release,
};
-#endif /* SMP */
-
#ifdef CONFIG_PREEMPT_DYNAMIC
static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
@@ -244,11 +243,13 @@ static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- static const char * preempt_modes[] = {
- "none", "voluntary", "full", "lazy",
- };
- int j = ARRAY_SIZE(preempt_modes) - !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int j;
+
+ /* Count entries in NULL terminated preempt_modes */
+ for (j = 0; preempt_modes[j]; j++)
+ ;
+ j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
for (; i < j; i++) {
if (preempt_dynamic_mode == i)
@@ -281,7 +282,6 @@ static const struct file_operations sched_dynamic_fops = {
__read_mostly bool sched_debug_verbose;
-#ifdef CONFIG_SMP
static struct dentry *sd_dentry;
@@ -292,7 +292,7 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
bool orig;
cpus_read_lock();
- mutex_lock(&sched_domains_mutex);
+ sched_domains_mutex_lock();
orig = sched_debug_verbose;
result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
@@ -304,14 +304,11 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
sd_dentry = NULL;
}
- mutex_unlock(&sched_domains_mutex);
+ sched_domains_mutex_unlock();
cpus_read_unlock();
return result;
}
-#else
-#define sched_verbose_write debugfs_write_file_bool
-#endif
static const struct file_operations sched_verbose_fops = {
.read = debugfs_read_file_bool,
@@ -379,10 +376,8 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
return -EINVAL;
}
- if (rq->cfs.h_nr_running) {
- update_rq_clock(rq);
- dl_server_stop(&rq->fair_server);
- }
+ update_rq_clock(rq);
+ dl_server_stop(&rq->fair_server);
retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
if (retval)
@@ -392,7 +387,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
cpu_of(rq));
- if (rq->cfs.h_nr_running)
+ if (rq->cfs.h_nr_queued)
dl_server_start(&rq->fair_server);
}
@@ -510,15 +505,13 @@ static __init int sched_init_debug(void)
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
-#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
- mutex_lock(&sched_domains_mutex);
+ sched_domains_mutex_lock();
update_sched_domain_debugfs();
- mutex_unlock(&sched_domains_mutex);
-#endif
+ sched_domains_mutex_unlock();
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
@@ -528,7 +521,7 @@ static __init int sched_init_debug(void)
debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -538,8 +531,6 @@ static __init int sched_init_debug(void)
}
late_initcall(sched_init_debug);
-#ifdef CONFIG_SMP
-
static cpumask_var_t sd_sysctl_cpus;
static int sd_flags_show(struct seq_file *m, void *v)
@@ -586,6 +577,10 @@ static void register_sd(struct sched_domain *sd, struct dentry *parent)
debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
+
+ if (sd->flags & SD_ASYM_PACKING)
+ debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
+ (u32 *)&sd->groups->asym_prefer_cpu);
}
void update_sched_domain_debugfs(void)
@@ -646,8 +641,6 @@ void dirty_sched_domain_sysctl(int cpu)
__cpumask_set_cpu(cpu, sd_sysctl_cpus);
}
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
@@ -684,18 +677,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
}
P(se->load.weight);
-#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
P(se->avg.runnable_avg);
-#endif
#undef PN_SCHEDSTAT
#undef PN
#undef P_SCHEDSTAT
#undef P
}
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
static DEFINE_SPINLOCK(sched_debug_lock);
@@ -805,7 +796,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
+ s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
struct sched_entity *last, *first, *root;
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -828,29 +819,26 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
last = __pick_last_entity(cfs_rq);
if (last)
right_vruntime = last->vruntime;
- min_vruntime = cfs_rq->min_vruntime;
+ zero_vruntime = cfs_rq->zero_vruntime;
raw_spin_rq_unlock_irqrestore(rq, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
SPLIT_NS(left_deadline));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
SPLIT_NS(left_vruntime));
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
- SPLIT_NS(min_vruntime));
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime",
+ SPLIT_NS(zero_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
SPLIT_NS(avg_vruntime(cfs_rq)));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
SPLIT_NS(right_vruntime));
spread = right_vruntime - left_vruntime;
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
- SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
- SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
- SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
- cfs_rq->idle_nr_running);
- SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
- cfs_rq->idle_h_nr_running);
+ SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
-#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
@@ -870,8 +858,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->tg_load_avg_contrib);
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
-#endif
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
@@ -925,11 +912,7 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
PU(dl_nr_running);
-#ifdef CONFIG_SMP
dl_bw = &cpu_rq(cpu)->rd->dl_bw;
-#else
- dl_bw = &dl_rq->dl_bw;
-#endif
SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
@@ -947,9 +930,9 @@ static void print_cpu(struct seq_file *m, int cpu)
SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
cpu, freq / 1000, (freq % 1000));
}
-#else
+#else /* !CONFIG_X86: */
SEQ_printf(m, "cpu#%d\n", cpu);
-#endif
+#endif /* !CONFIG_X86 */
#define P(x) \
do { \
@@ -972,12 +955,10 @@ do { \
#undef P
#undef PN
-#ifdef CONFIG_SMP
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
P64(avg_idle);
P64(max_idle_balance_cost);
#undef P64
-#endif
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
if (schedstat_enabled()) {
@@ -1159,7 +1140,7 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
task_node(p), task_numa_group_id(p));
show_numa_stats(p, m);
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
}
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
@@ -1243,7 +1224,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
__PS("nr_involuntary_switches", p->nivcsw);
P(se.load.weight);
-#ifdef CONFIG_SMP
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
@@ -1252,18 +1232,19 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
-#endif
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
-#endif
+#endif /* CONFIG_UCLAMP_TASK */
P(policy);
P(prio);
if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
+ } else if (fair_policy(p->policy)) {
+ P(se.slice);
}
#ifdef CONFIG_SCHED_CLASS_EXT
__PS("ext.enabled", task_on_scx(p));
@@ -1294,8 +1275,10 @@ void resched_latency_warn(int cpu, u64 latency)
{
static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
- WARN(__ratelimit(&latency_check_ratelimit),
- "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
- "without schedule\n",
- cpu, latency, cpu_rq(cpu)->ticks_without_resched);
+ if (likely(!__ratelimit(&latency_check_ratelimit)))
+ return;
+
+ pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
+ cpu, latency, cpu_rq(cpu)->ticks_without_resched);
+ dump_stack();
}