summaryrefslogtreecommitdiff
path: root/include/linux/preempt.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-09-23 18:54:44 +0200
committerPeter Zijlstra <peterz@infradead.org>2021-10-01 13:57:51 +0200
commit3e9cc688e56cc2abb9b6067f57c8397f6c96d42c (patch)
tree71b840a99263940d42c5d3fbe357f42817e83813 /include/linux/preempt.h
parent50e081b96e35e43b65591f40f7376204decd1cb5 (diff)
sched: Make cond_resched_lock() variants RT aware
The __might_resched() checks in the cond_resched_lock() variants use PREEMPT_LOCK_OFFSET for preempt count offset checking which takes the preemption disable by the spin_lock() which is still held at that point into account. On PREEMPT_RT enabled kernels spin/rw_lock held sections stay preemptible which means PREEMPT_LOCK_OFFSET is 0, but that still triggers the __might_resched() check because that takes RCU read side nesting into account. On RT enabled kernels spin/read/write_lock() issue rcu_read_lock() to resemble the !RT semantics, which means in cond_resched_lock() the might resched check will see preempt_count() == 0 and rcu_preempt_depth() == 1. Introduce PREEMPT_LOCK_SCHED_OFFSET for those might resched checks and map them depending on CONFIG_PREEMPT_RT. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210923165358.305969211@linutronix.de
Diffstat (limited to 'include/linux/preempt.h')
-rw-r--r--include/linux/preempt.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 4d244e295e85..031898b38d06 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -122,9 +122,10 @@
* The preempt_count offset after spin_lock()
*/
#if !defined(CONFIG_PREEMPT_RT)
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
#else
-#define PREEMPT_LOCK_OFFSET 0
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
#endif
/*