summaryrefslogtreecommitdiff
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-03-09 09:55:57 +0100
committerThomas Gleixner <tglx@linutronix.de>2021-03-17 16:34:11 +0100
commit47c218dcae6587fb5bce30f1656b13e22391c8e3 (patch)
tree0577017548da523ecaf141594aefd66291d036e7 /kernel/softirq.c
parent8b1c04acad082dec76f3f8f7e1fa13493d6cbb79 (diff)
tick/sched: Prevent false positive softirq pending warnings on RT
On RT a task which has soft interrupts disabled can block on a lock and schedule out to idle while soft interrupts are pending. This triggers the warning in the NOHZ idle code which complains about going idle with pending soft interrupts. But as the task is blocked soft interrupt processing is temporarily blocked as well which means that such a warning is a false positive. To prevent that check the per CPU state which indicates that a scheduled out task has soft interrupts disabled. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210309085727.527563866@linutronix.de
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1ed1c55aa2a7..5a99696da86a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -141,6 +141,21 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
+/**
+ * local_bh_blocked() - Check for idle whether BH processing is blocked
+ *
+ * Returns false if the per CPU softirq::cnt is 0 otherwise true.
+ *
+ * This is invoked from the idle task to guard against false positive
+ * softirq pending warnings, which would happen when the task which holds
+ * softirq_ctrl::lock was the only running task on the CPU and blocks on
+ * some other lock.
+ */
+bool local_bh_blocked(void)
+{
+ return __this_cpu_read(softirq_ctrl.cnt) != 0;
+}
+
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;