summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorBenjamin Gray <bgray@linux.ibm.com>2023-08-29 16:34:55 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2023-09-18 12:23:47 +1000
commitcc879ab3ce39bc39f9b1d238b283f43a5f6f957d (patch)
tree8110700ed8845eb60e09ab728ebfecb578a18ac0 /arch/powerpc/kernel/hw_breakpoint.c
parent4ff3ba4db5943cac1045e3e4a3c0463ea10f6930 (diff)
powerpc/watchpoints: Disable preemption in thread_change_pc()
thread_change_pc() uses CPU local data, so must be protected from swapping CPUs while it is reading the breakpoint struct. The error is more noticeable after 1e60f3564bad ("powerpc/watchpoints: Track perf single step directly on the breakpoint"), which added an unconditional __this_cpu_read() call in thread_change_pc(). However the existing __this_cpu_read() that runs if a breakpoint does need to be re-inserted has the same issue. Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230829063457.54157-2-bgray@linux.ibm.com
Diffstat (limited to 'arch/powerpc/kernel/hw_breakpoint.c')
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index b8513dc3e53a..2854376870cf 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
struct arch_hw_breakpoint *info;
int i;
+ preempt_disable();
+
for (i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
goto reset;
}
- return;
+ goto out;
reset:
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
@@ -245,6 +247,9 @@ reset:
__set_breakpoint(i, info);
info->perf_single_step = false;
}
+
+out:
+ preempt_enable();
}
static bool is_larx_stcx_instr(int type)