summaryrefslogtreecommitdiff
path: root/arch/x86/mm/kmmio.c
diff options
context:
space:
mode:
authorSteven Rostedt (Google) <rostedt@goodmis.org>2022-12-12 10:37:03 -0500
committerSteven Rostedt (Google) <rostedt@goodmis.org>2022-12-12 10:54:48 -0500
commit3e12758392bee50135301b0189c064ab80980aca (patch)
tree6b7396edb304412a33c7dd7babde585145b79bb9 /arch/x86/mm/kmmio.c
parentc1ac03af6ed45d05786c219d102f37eb44880f28 (diff)
x86/mm/kmmio: Remove redundant preempt_disable()
Now that kmmio uses rcu_read_lock_sched_notrace() there's no reason to call preempt_disable() as the read_lock_sched_notrace() already does that and is redundant. This also removes the preempt_enable_no_resched() as the "no_resched()" portion was bogus as there's no reason to do that. Link: https://lkml.kernel.org/r/20221212103703.7129cc5d@gandalf.local.home Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: "x86@kernel.org" <x86@kernel.org> Cc: Karol Herbst <karolherbst@gmail.com> Cc: Pekka Paalanen <ppaalanen@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/mm/kmmio.c')
-rw-r--r--arch/x86/mm/kmmio.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 853c49877c16..9f82019179e1 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -246,14 +246,13 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
page_base &= page_level_mask(l);
/*
- * Preemption is now disabled to prevent process switch during
- * single stepping. We can only handle one active kmmio trace
+ * Hold the RCU read lock over single stepping to avoid looking
+ * up the probe and kmmio_fault_page again. The rcu_read_lock_sched()
+ * also disables preemption and prevents process switch during
+ * the single stepping. We can only handle one active kmmio trace
* per cpu, so ensure that we finish it before something else
- * gets to run. We also hold the RCU read lock over single
- * stepping to avoid looking up the probe and kmmio_fault_page
- * again.
+ * gets to run.
*/
- preempt_disable();
rcu_read_lock_sched_notrace();
faultpage = get_kmmio_fault_page(page_base);
@@ -324,7 +323,6 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
no_kmmio:
rcu_read_unlock_sched_notrace();
- preempt_enable_no_resched();
return ret;
}
@@ -364,7 +362,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
ctx->active--;
BUG_ON(ctx->active);
rcu_read_unlock_sched_notrace();
- preempt_enable_no_resched();
/*
* if somebody else is singlestepping across a probe point, flags