From 924aed1646bf2859726f7e5dd9265ba14358fbb8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 12 Jan 2023 20:43:28 +0100 Subject: cpuidle, cpu_pm: Remove RCU fiddling from cpu_pm_{enter,exit}() All callers should still have RCU enabled. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Tested-by: Tony Lindgren Tested-by: Ulf Hansson Reviewed-by: Ulf Hansson Acked-by: Mark Rutland Acked-by: Rafael J. Wysocki Acked-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20230112195540.190860672@infradead.org --- kernel/cpu_pm.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'kernel/cpu_pm.c') diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index ba4ba71facf9..b0f0d15085db 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -30,16 +30,9 @@ static int cpu_pm_notify(enum cpu_pm_event event) { int ret; - /* - * This introduces a RCU read critical section, which could be - * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know - * this. - */ - ct_irq_enter_irqson(); rcu_read_lock(); ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL); rcu_read_unlock(); - ct_irq_exit_irqson(); return notifier_to_errno(ret); } @@ -49,11 +42,9 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev unsigned long flags; int ret; - ct_irq_enter_irqson(); raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL); raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); - ct_irq_exit_irqson(); return notifier_to_errno(ret); } -- cgit