summaryrefslogtreecommitdiff
path: root/include/linux/preempt.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-10-04 14:46:58 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-11-05 12:55:38 +0100
commit7c70cb94d29cd325fabe4a818c18613e3b9919a1 (patch)
tree2c4d6b58f7e284778060e2aff70db2b87d7a977c /include/linux/preempt.h
parent26baa1f1c4bdc34b8d698c1900b407d863ad0e69 (diff)
sched: Add Lazy preemption model
Change fair to use resched_curr_lazy(), which, when the lazy preemption model is selected, will set TIF_NEED_RESCHED_LAZY. This LAZY bit will be promoted to the full NEED_RESCHED bit on tick. As such, the average delay between setting LAZY and actually rescheduling will be TICK_NSEC/2. In short, Lazy preemption will delay preemption for fair class but will function as Full preemption for all the other classes, most notably the realtime (RR/FIFO/DEADLINE) classes. The goal is to bridge the performance gap with Voluntary, such that we might eventually remove that option entirely. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lkml.kernel.org/r/20241007075055.331243614@infradead.org
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r--include/linux/preempt.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index ce76f1a45722..ca86235ac15c 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
extern bool preempt_model_none(void);
extern bool preempt_model_voluntary(void);
extern bool preempt_model_full(void);
+extern bool preempt_model_lazy(void);
#else
@@ -502,6 +503,11 @@ static inline bool preempt_model_full(void)
return IS_ENABLED(CONFIG_PREEMPT);
}
+static inline bool preempt_model_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_LAZY);
+}
+
#endif
static inline bool preempt_model_rt(void)
@@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void)
*/
static inline bool preempt_model_preemptible(void)
{
- return preempt_model_full() || preempt_model_rt();
+ return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
}
#endif /* __LINUX_PREEMPT_H */