summaryrefslogtreecommitdiff
path: root/include/linux/sched
diff options
context:
space:
mode:
authorWander Lairson Costa <wander@redhat.com>2023-06-14 09:23:22 -0300
committerPeter Zijlstra <peterz@infradead.org>2023-07-13 15:21:48 +0200
commit893cdaaa3977be6afb3a7f756fbfd7be83f68d8c (patch)
treea97d1a636213462cff5f969f3db05cbe34833b16 /include/linux/sched
parentd243b34459cea30cfe5f3a9b2feb44e7daff9938 (diff)
sched: avoid false lockdep splat in put_task_struct()
In put_task_struct(), a spin_lock is indirectly acquired under the kernel stock. When running the kernel in real-time (RT) configuration, the operation is dispatched to a preemptible context call to ensure guaranteed preemption. However, if PROVE_RAW_LOCK_NESTING is enabled and __put_task_struct() is called while holding a raw_spinlock, lockdep incorrectly reports an "Invalid lock context" in the stock kernel. This false splat occurs because lockdep is unaware of the different route taken under RT. To address this issue, override the inner wait type to prevent the false lockdep splat. Suggested-by: Oleg Nesterov <oleg@redhat.com> Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Wander Lairson Costa <wander@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20230614122323.37957-3-wander@redhat.com
Diffstat (limited to 'include/linux/sched')
-rw-r--r--include/linux/sched/task.h18
1 files changed, 14 insertions, 4 deletions
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 6b687c155fb6..a23af225c898 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -126,6 +126,19 @@ static inline void put_task_struct(struct task_struct *t)
return;
/*
+ * In !RT, it is always safe to call __put_task_struct().
+ * Under RT, we can only call it in preemptible context.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
+
+ lock_map_acquire_try(&put_task_map);
+ __put_task_struct(t);
+ lock_map_release(&put_task_map);
+ return;
+ }
+
+ /*
* under PREEMPT_RT, we can't call put_task_struct
* in atomic context because it will indirectly
* acquire sleeping locks.
@@ -145,10 +158,7 @@ static inline void put_task_struct(struct task_struct *t)
* when it fails to fork a process. Therefore, there is no
* way it can conflict with put_task_struct().
*/
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
- call_rcu(&t->rcu, __put_task_struct_rcu_cb);
- else
- __put_task_struct(t);
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))