diff options
Diffstat (limited to 'kernel/sched/features.h')
| -rw-r--r-- | kernel/sched/features.h | 120 |
1 files changed, 88 insertions, 32 deletions
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 99399f8e4799..980d92bab8ab 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -1,72 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + /* - * Only give sleepers 50% of their service deficit. This allows - * them to run sooner, but does not allow tons of sleepers to - * rip the spread apart. + * Using the avg_vruntime, do the right thing and preserve lag across + * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) - +SCHED_FEAT(PLACE_LAG, true) +/* + * Give new tasks half a slice to ease into the competition. + */ +SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) +/* + * Preserve relative virtual deadline on 'migration'. + */ +SCHED_FEAT(PLACE_REL_DEADLINE, true) +/* + * Inhibit (wakeup) preemption until the current task has either matched the + * 0-lag point or until is has exhausted it's slice. + */ +SCHED_FEAT(RUN_TO_PARITY, true) /* - * Place new tasks ahead so that they do not starve already running - * tasks + * Allow wakeup of tasks with a shorter slice to cancel RUN_TO_PARITY for + * current. */ -SCHED_FEAT(START_DEBIT, true) +SCHED_FEAT(PREEMPT_SHORT, true) /* * Prefer to schedule the task we woke last (assuming it failed * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ -SCHED_FEAT(NEXT_BUDDY, false) +SCHED_FEAT(NEXT_BUDDY, true) /* - * Prefer to schedule the task that ran last (when we did - * wake-preempt) as that likely will touch the same data, increases - * cache locality. + * Allow completely ignoring cfs_rq->next; which can be set from various + * places: + * - NEXT_BUDDY (wakeup preemption) + * - yield_to_task() + * - cgroup dequeue / pick */ -SCHED_FEAT(LAST_BUDDY, true) +SCHED_FEAT(PICK_BUDDY, true) /* - * Consider buddies to be cache hot, decreases the likelyness of a + * Consider buddies to be cache hot, decreases the likeliness of a * cache buddy being migrated away, increases cache locality. */ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* - * Allow wakeup-time preemption of the current task: + * Delay dequeueing tasks until they get selected or woken. + * + * By delaying the dequeue for non-eligible tasks, they remain in the + * competition and can burn off their negative lag. When they get selected + * they'll have positive lag by definition. + * + * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0. */ -SCHED_FEAT(WAKEUP_PREEMPTION, true) +SCHED_FEAT(DELAY_DEQUEUE, true) +SCHED_FEAT(DELAY_ZERO, true) /* - * Use arch dependent cpu power functions + * Allow wakeup-time preemption of the current task: */ -SCHED_FEAT(ARCH_POWER, true) +SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(HRTICK, false) -SCHED_FEAT(DOUBLE_TICK, false) -SCHED_FEAT(LB_BIAS, true) +SCHED_FEAT(HRTICK_DL, false) /* - * Decrement CPU power based on time not spent running tasks + * Decrement CPU capacity based on time not spent running tasks */ -SCHED_FEAT(NONTASK_POWER, true) +SCHED_FEAT(NONTASK_CAPACITY, true) + +#ifdef CONFIG_PREEMPT_RT +SCHED_FEAT(TTWU_QUEUE, false) +#else /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#endif -SCHED_FEAT(FORCE_SD_OVERLAP, false) -SCHED_FEAT(RT_RUNTIME_SHARE, true) -SCHED_FEAT(LB_MIN, false) +/* + * When doing wakeups, attempt to limit superfluous scans of the LLC domain. + */ +SCHED_FEAT(SIS_UTIL, true) + +/* + * Issue a WARN when we do multiple update_rq_clock() calls + * in a single rq->lock section. Default disabled because the + * annotations are not complete. + */ +SCHED_FEAT(WARN_DOUBLE_CLOCK, false) +#ifdef HAVE_RT_PUSH_IPI /* - * Apply the automatic NUMA scheduling policy. Enabled automatically - * at runtime if running on a NUMA machine. Can be controlled via - * numa_balancing=. Allow PTE scanning to be forced on UMA machines - * for debugging the core machinery. + * In order to avoid a thundering herd attack of CPUs that are + * lowering their priorities at the same time, and there being + * a single CPU that has an RT task that can migrate and is waiting + * to run, where the other CPUs will try to take that CPUs + * rq lock and possibly create a large contention, sending an + * IPI to that CPU and let that CPU push the RT task to where + * it should go may be a better scenario. */ -#ifdef CONFIG_NUMA_BALANCING -SCHED_FEAT(NUMA, false) -SCHED_FEAT(NUMA_FORCE, false) +SCHED_FEAT(RT_PUSH_IPI, true) #endif + +SCHED_FEAT(RT_RUNTIME_SHARE, false) +SCHED_FEAT(LB_MIN, false) +SCHED_FEAT(ATTACH_AGE_LOAD, true) + +SCHED_FEAT(WA_IDLE, true) +SCHED_FEAT(WA_WEIGHT, true) +SCHED_FEAT(WA_BIAS, true) + +/* + * UtilEstimation. Use estimated CPU utilization. + */ +SCHED_FEAT(UTIL_EST, true) + +SCHED_FEAT(LATENCY_WARN, false) + +/* + * Do newidle balancing proportional to its success rate using randomization. + */ +SCHED_FEAT(NI_RANDOM, true) |
