diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 17:00:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-16 17:00:50 -0700 |
commit | 4a996d90b9e046c6d59845acf00a54d464c34ff3 (patch) | |
tree | 52fa410c742aaec6b469a3907e71eefc75e999b3 /kernel/sched/psi.c | |
parent | 0c182ac2ebc5470a725632b08cee9a52065bbe71 (diff) | |
parent | db43a609d01e8bf9b812d45dc2945c65b57dd793 (diff) |
Merge tag 'sched-core-2024-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
- Update Daniel Bristot de Oliveira's entry in MAINTAINERS,
and credit him in CREDITS
- Harmonize the lock-yielding behavior on dynamically selected
preemption models with static ones
- Reorganize the code a bit: split out sched/syscalls.c to reduce
the size of sched/core.c
- Micro-optimize psi_group_change()
- Fix set_load_weight() for SCHED_IDLE tasks
- Misc cleanups & fixes
* tag 'sched-core-2024-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Update MAINTAINERS and CREDITS
sched/fair: set_load_weight() must also call reweight_task() for SCHED_IDLE tasks
sched/psi: Optimise psi_group_change a bit
sched/core: Drop spinlocks on contention iff kernel is preemptible
sched/core: Move preempt_model_*() helpers from sched.h to preempt.h
sched/balance: Skip unnecessary updates to idle load balancer's flags
idle: Remove stale RCU comment
sched/headers: Move struct pre-declarations to the beginning of the header
sched/core: Clean up kernel/sched/sched.h a bit
sched/core: Simplify prefetch_curr_exec_start()
sched: Fix spelling in comments
sched/syscalls: Split out kernel/sched/syscalls.c from kernel/sched/core.c
Diffstat (limited to 'kernel/sched/psi.c')
-rw-r--r-- | kernel/sched/psi.c | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 507d7b8d79af..020d58967d4e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -41,7 +41,7 @@ * What it means for a task to be productive is defined differently * for each resource. For IO, productive means a running task. For * memory, productive means a running task that isn't a reclaimer. For - * CPU, productive means an oncpu task. + * CPU, productive means an on-CPU task. * * Naturally, the FULL state doesn't exist for the CPU resource at the * system level, but exist at the cgroup level. At the cgroup level, @@ -49,7 +49,7 @@ * resource which is being used by others outside of the cgroup or * throttled by the cgroup cpu.max configuration. * - * The percentage of wallclock time spent in those compound stall + * The percentage of wall clock time spent in those compound stall * states gives pressure numbers between 0 and 100 for each resource, * where the SOME percentage indicates workload slowdowns and the FULL * percentage indicates reduced CPU utilization: @@ -218,28 +218,32 @@ void __init psi_init(void) group_init(&psi_system); } -static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) +static u32 test_states(unsigned int *tasks, u32 state_mask) { - switch (state) { - case PSI_IO_SOME: - return unlikely(tasks[NR_IOWAIT]); - case PSI_IO_FULL: - return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); - case PSI_MEM_SOME: - return unlikely(tasks[NR_MEMSTALL]); - case PSI_MEM_FULL: - return unlikely(tasks[NR_MEMSTALL] && - tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); - case PSI_CPU_SOME: - return unlikely(tasks[NR_RUNNING] > oncpu); - case PSI_CPU_FULL: - return unlikely(tasks[NR_RUNNING] && !oncpu); - case PSI_NONIDLE: - return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || - tasks[NR_RUNNING]; - default: - return false; + const bool oncpu = state_mask & PSI_ONCPU; + + if (tasks[NR_IOWAIT]) { + state_mask |= BIT(PSI_IO_SOME); + if (!tasks[NR_RUNNING]) + state_mask |= BIT(PSI_IO_FULL); + } + + if (tasks[NR_MEMSTALL]) { + state_mask |= BIT(PSI_MEM_SOME); + if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]) + state_mask |= BIT(PSI_MEM_FULL); } + + if (tasks[NR_RUNNING] > oncpu) + state_mask |= BIT(PSI_CPU_SOME); + + if (tasks[NR_RUNNING] && !oncpu) + state_mask |= BIT(PSI_CPU_FULL); + + if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING]) + state_mask |= BIT(PSI_NONIDLE); + + return state_mask; } static void get_recent_times(struct psi_group *group, int cpu, @@ -345,7 +349,7 @@ static void collect_percpu_times(struct psi_group *group, /* * Collect the per-cpu time buckets and average them into a - * single time sample that is normalized to wallclock time. + * single time sample that is normalized to wall clock time. * * For averaging, each CPU is weighted by its non-idle time in * the sampling period. This eliminates artifacts from uneven @@ -770,7 +774,6 @@ static void psi_group_change(struct psi_group *group, int cpu, { struct psi_group_cpu *groupc; unsigned int t, m; - enum psi_states s; u32 state_mask; lockdep_assert_rq_held(cpu_rq(cpu)); @@ -842,10 +845,7 @@ static void psi_group_change(struct psi_group *group, int cpu, return; } - for (s = 0; s < NR_PSI_STATES; s++) { - if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU)) - state_mask |= (1 << s); - } + state_mask = test_states(groupc->tasks, state_mask); /* * Since we care about lost potential, a memstall is FULL @@ -1205,7 +1205,7 @@ void psi_cgroup_restart(struct psi_group *group) /* * After we disable psi_group->enabled, we don't actually * stop percpu tasks accounting in each psi_group_cpu, - * instead only stop test_state() loop, record_times() + * instead only stop test_states() loop, record_times() * and averaging worker, see psi_group_change() for details. * * When disable cgroup PSI, this function has nothing to sync @@ -1213,7 +1213,7 @@ void psi_cgroup_restart(struct psi_group *group) * would see !psi_group->enabled and only do task accounting. * * When re-enable cgroup PSI, this function use psi_group_change() - * to get correct state mask from test_state() loop on tasks[], + * to get correct state mask from test_states() loop on tasks[], * and restart groupc->state_start from now, use .clear = .set = 0 * here since no task status really changed. */ |