From 91ea62d58bd661827c328a2c6c02a87fa4aae88b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 18 Dec 2020 16:39:14 +0100 Subject: softirq: Avoid bad tracing / lockdep interaction Similar to commit: 1a63dcd8765b ("softirq: Reorder trace_softirqs_on to prevent lockdep splat") __local_bh_enable_ip() can also call into tracing with inconsistent state. Unlike that commit we don't need to bother about the tracepoint because 'cnt-1' never matches preempt_count() (by construction). Reported-by: Heiko Carstens Signed-off-by: Peter Zijlstra (Intel) Tested-by: Heiko Carstens Link: https://lkml.kernel.org/r/20201218154519.GW3092@hirez.programming.kicks-ass.net --- kernel/softirq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/softirq.c') diff --git a/kernel/softirq.c b/kernel/softirq.c index 09229ad82209..0f1d3a32d53b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -185,7 +185,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) * Keep preemption disabled until we are done with * softirq processing: */ - preempt_count_sub(cnt - 1); + __preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { /* -- cgit