diff options
Diffstat (limited to 'kernel/sched/features.h')
| -rw-r--r-- | kernel/sched/features.h | 74 |
1 files changed, 55 insertions, 19 deletions
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 858589b83377..980d92bab8ab 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -1,62 +1,91 @@ /* SPDX-License-Identifier: GPL-2.0 */ + /* - * Only give sleepers 50% of their service deficit. This allows - * them to run sooner, but does not allow tons of sleepers to - * rip the spread apart. + * Using the avg_vruntime, do the right thing and preserve lag across + * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) - +SCHED_FEAT(PLACE_LAG, true) /* - * Place new tasks ahead so that they do not starve already running - * tasks + * Give new tasks half a slice to ease into the competition. */ -SCHED_FEAT(START_DEBIT, true) +SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) +/* + * Preserve relative virtual deadline on 'migration'. + */ +SCHED_FEAT(PLACE_REL_DEADLINE, true) +/* + * Inhibit (wakeup) preemption until the current task has either matched the + * 0-lag point or until is has exhausted it's slice. + */ +SCHED_FEAT(RUN_TO_PARITY, true) +/* + * Allow wakeup of tasks with a shorter slice to cancel RUN_TO_PARITY for + * current. + */ +SCHED_FEAT(PREEMPT_SHORT, true) /* * Prefer to schedule the task we woke last (assuming it failed * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ -SCHED_FEAT(NEXT_BUDDY, false) +SCHED_FEAT(NEXT_BUDDY, true) /* - * Prefer to schedule the task that ran last (when we did - * wake-preempt) as that likely will touch the same data, increases - * cache locality. + * Allow completely ignoring cfs_rq->next; which can be set from various + * places: + * - NEXT_BUDDY (wakeup preemption) + * - yield_to_task() + * - cgroup dequeue / pick */ -SCHED_FEAT(LAST_BUDDY, true) +SCHED_FEAT(PICK_BUDDY, true) /* - * Consider buddies to be cache hot, decreases the likelyness of a + * Consider buddies to be cache hot, decreases the likeliness of a * cache buddy being migrated away, increases cache locality. */ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* + * Delay dequeueing tasks until they get selected or woken. + * + * By delaying the dequeue for non-eligible tasks, they remain in the + * competition and can burn off their negative lag. When they get selected + * they'll have positive lag by definition. + * + * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0. + */ +SCHED_FEAT(DELAY_DEQUEUE, true) +SCHED_FEAT(DELAY_ZERO, true) + +/* * Allow wakeup-time preemption of the current task: */ SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(HRTICK, false) -SCHED_FEAT(DOUBLE_TICK, false) -SCHED_FEAT(LB_BIAS, false) +SCHED_FEAT(HRTICK_DL, false) /* * Decrement CPU capacity based on time not spent running tasks */ SCHED_FEAT(NONTASK_CAPACITY, true) +#ifdef CONFIG_PREEMPT_RT +SCHED_FEAT(TTWU_QUEUE, false) +#else + /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#endif /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain. */ -SCHED_FEAT(SIS_AVG_CPU, false) -SCHED_FEAT(SIS_PROP, true) +SCHED_FEAT(SIS_UTIL, true) /* * Issue a WARN when we do multiple update_rq_clock() calls @@ -78,7 +107,7 @@ SCHED_FEAT(WARN_DOUBLE_CLOCK, false) SCHED_FEAT(RT_PUSH_IPI, true) #endif -SCHED_FEAT(RT_RUNTIME_SHARE, true) +SCHED_FEAT(RT_RUNTIME_SHARE, false) SCHED_FEAT(LB_MIN, false) SCHED_FEAT(ATTACH_AGE_LOAD, true) @@ -90,3 +119,10 @@ SCHED_FEAT(WA_BIAS, true) * UtilEstimation. Use estimated CPU utilization. */ SCHED_FEAT(UTIL_EST, true) + +SCHED_FEAT(LATENCY_WARN, false) + +/* + * Do newidle balancing proportional to its success rate using randomization. + */ +SCHED_FEAT(NI_RANDOM, true) |
