summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-04-12 09:50:20 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2023-04-12 10:04:56 +0100
commitc76c6c4ecbec0deb56a4f9e932b26866024a508f (patch)
treee6da7ee937a9e9ee7b29dcab602a24d2ee974714
parent3a2bdad0b46649cc73fb3b3f9e2b91ef97a7fa63 (diff)
ARM: 9294/2: vfp: Fix broken softirq handling with instrumentation enabled
Commit 62b95a7b44d1 ("ARM: 9282/1: vfp: Manipulate task VFP state with softirqs disabled") replaced the en/disable preemption calls inside the VFP state handling code with en/disabling of soft IRQs, which is necessary to allow kernel use of the VFP/SIMD unit when handling a soft IRQ. Unfortunately, when lockdep is enabled (or other instrumentation that enables TRACE_IRQFLAGS), the disable path implemented in asm fails to perform the lockdep and RCU related bookkeeping, resulting in spurious warnings and other badness. Set let's rework the VFP entry code a little bit so we can make the local_bh_disable() call from C, with all the instrumentations that happen to have been configured. Calling local_bh_enable() can be done from asm, as it is a simple wrapper around __local_bh_enable_ip(), which is always a callable function. Link: https://lore.kernel.org/all/ZBBYCSZUJOWBg1s8@localhost.localdomain/ Fixes: 62b95a7b44d1 ("ARM: 9282/1: vfp: Manipulate task VFP state with softirqs disabled") Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
-rw-r--r--arch/arm/include/asm/assembler.h13
-rw-r--r--arch/arm/vfp/entry.S11
-rw-r--r--arch/arm/vfp/vfphw.S12
-rw-r--r--arch/arm/vfp/vfpmodule.c27
4 files changed, 29 insertions, 34 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 06b48ce23e1c..505a306e0271 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -244,19 +244,6 @@ THUMB( fpreg .req r7 )
.endm
#endif
- .macro local_bh_disable, ti, tmp
- ldr \tmp, [\ti, #TI_PREEMPT]
- add \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
- str \tmp, [\ti, #TI_PREEMPT]
- .endm
-
- .macro local_bh_enable_ti, ti, tmp
- get_thread_info \ti
- ldr \tmp, [\ti, #TI_PREEMPT]
- sub \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
- str \tmp, [\ti, #TI_PREEMPT]
- .endm
-
#define USERL(l, x...) \
9999: x; \
.pushsection __ex_table,"a"; \
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 6dabb4761778..7483ef8bccda 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -24,14 +24,5 @@
ENTRY(do_vfp)
mov r1, r10
mov r3, r9
- ldr r4, .LCvfp
- ldr pc, [r4] @ call VFP entry point
+ b vfp_entry
ENDPROC(do_vfp)
-
-ENTRY(vfp_null_entry)
- ret lr
-ENDPROC(vfp_null_entry)
-
- .align 2
-.LCvfp:
- .word vfp_vector
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 60acd42e0578..4d8478264d82 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -75,8 +75,6 @@
@ lr = unrecognised instruction return address
@ IRQs enabled.
ENTRY(vfp_support_entry)
- local_bh_disable r1, r4
-
ldr r11, [r1, #TI_CPU] @ CPU number
add r10, r1, #TI_VFPSTATE @ r10 = workspace
@@ -179,9 +177,12 @@ vfp_hw_state_valid:
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
- local_bh_enable_ti r10, r4
- ret r3 @ we think we have handled things
+ mov lr, r3 @ we think we have handled things
+local_bh_enable_and_ret:
+ adr r0, .
+ mov r1, #SOFTIRQ_DISABLE_OFFSET
+ b __local_bh_enable_ip @ tail call
look_for_VFP_exceptions:
@ Check for synchronous or asynchronous exception
@@ -204,8 +205,7 @@ skip:
@ not recognised by VFP
DBGSTR "not VFP"
- local_bh_enable_ti r10, r4
- ret lr
+ b local_bh_enable_and_ret
process_exception:
DBGSTR "bounce"
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 01bc48d73847..349dcb944a93 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -32,10 +32,9 @@
/*
* Our undef handlers (in entry.S)
*/
-asmlinkage void vfp_support_entry(void);
-asmlinkage void vfp_null_entry(void);
+asmlinkage void vfp_support_entry(u32, void *, u32, u32);
-asmlinkage void (*vfp_vector)(void) = vfp_null_entry;
+static bool have_vfp __ro_after_init;
/*
* Dual-use variable.
@@ -645,6 +644,25 @@ static int vfp_starting_cpu(unsigned int unused)
return 0;
}
+/*
+ * Entered with:
+ *
+ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ * r1 = thread_info pointer
+ * r2 = PC value to resume execution after successful emulation
+ * r3 = normal "successful" return address
+ * lr = unrecognised instruction return address
+ */
+asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc,
+ u32 resume_return_address)
+{
+ if (unlikely(!have_vfp))
+ return;
+
+ local_bh_disable();
+ vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
+}
+
#ifdef CONFIG_KERNEL_MODE_NEON
static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
@@ -798,7 +816,6 @@ static int __init vfp_init(void)
vfpsid = fmrx(FPSID);
barrier();
unregister_undef_hook(&vfp_detect_hook);
- vfp_vector = vfp_null_entry;
pr_info("VFP support v0.3: ");
if (VFP_arch) {
@@ -883,7 +900,7 @@ static int __init vfp_init(void)
"arm/vfp:starting", vfp_starting_cpu,
vfp_dying_cpu);
- vfp_vector = vfp_support_entry;
+ have_vfp = true;
thread_register_notifier(&vfp_notifier_block);
vfp_pm_init();