From 8769177658d3559c4323200a719dd456d2f2675a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sat, 18 Mar 2023 17:24:27 +0100 Subject: ARM: vfp: Record VFP bounces as perf emulation faults VFP 'bouncing' occurs when the VFP unit cannot complete the execution of a VFP instruction, either because it is not implemented at all, or because the values of the arguments are out of range for the hardware implementation, and the software needs to step in to complete the operation. To give some insight in how much certain programs rely on this bouncing, record the emulation of a VFP instruction in perf's emulation-faults counter. This can be used like so perf stat -e emulation-faults ./testfloat -all2 and the output will be something like Performance counter stats for './testfloat -all2': 259,277 emulation-faults:u 6.846432176 seconds time elapsed Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/vfp/vfpmodule.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 349dcb944a93..08d5dfcf7079 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -313,6 +314,7 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) * emulate it. */ } + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); return exceptions & ~VFP_NAN_FLAG; } -- cgit From 4a0548c6681cd25b8d76e897e01bfb62ce93916d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Mar 2023 11:01:16 +0100 Subject: ARM: vfp: Remove workaround for Feroceon CPUs Feroceon CPUs have a non-standard implementation of VFP which reports synchronous VFP exceptions using the async VFP flag. This requires a workaround which is difficult to reconcile with other implementations, making it tricky to support both versions in a single image. Since this is a v5 CPU, it is not supported by armhf and so the likelihood that anybody is using this with recent distros/kernels and rely on the VFP at the same time is extremely low. So let's just disable VFP support on these cores, so we can remove the workaround. This will help future development to support v5 and v6 CPUs with a single kernel image. Reviewed-by: Linus Walleij Acked-by: Nicolas Pitre Acked-by: Arnd Bergmann Signed-off-by: Ard Biesheuvel --- arch/arm/mm/proc-feroceon.S | 4 ++++ arch/arm/vfp/vfphw.S | 4 ---- arch/arm/vfp/vfpmodule.c | 8 +++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 61ce82aca6f0..072ff9b451f8 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -56,6 +56,10 @@ ENTRY(cpu_feroceon_proc_init) movne r2, r2, lsr #2 @ turned into # of sets sub r2, r2, #(1 << 5) stmia r1, {r2, r3} +#ifdef CONFIG_VFP + mov r1, #1 @ disable quirky VFP + str_l r1, VFP_arch_feroceon, r2 +#endif ret lr /* diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index a4610d0f3215..0aeb60ac3b53 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -110,7 +110,6 @@ ENTRY(vfp_support_entry) beq vfp_reload_hw @ then the hw state needs reloading VFPFSTMIA r4, r5 @ save the working registers VFPFMRX r5, FPSCR @ current status -#ifndef CONFIG_CPU_FEROCEON tst r1, #FPEXC_EX @ is there additional state to save? beq 1f VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) @@ -118,7 +117,6 @@ ENTRY(vfp_support_entry) beq 1f VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) 1: -#endif stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 vfp_reload_hw: @@ -153,7 +151,6 @@ vfp_reload_hw: VFPFLDMIA r10, r5 @ reload the working registers while @ FPEXC is in a safe state ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 -#ifndef CONFIG_CPU_FEROCEON tst r1, #FPEXC_EX @ is there additional state to restore? beq 1f VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) @@ -161,7 +158,6 @@ vfp_reload_hw: beq 1f VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) 1: -#endif VFPFMXR FPSCR, r5 @ restore status @ The context stored in the VFP hardware is up to date with this thread diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 08d5dfcf7079..95628e57807b 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -42,7 +42,11 @@ static bool have_vfp __ro_after_init; * Used in startup: set to non-zero if VFP checks fail * After startup, holds VFP architecture */ -static unsigned int __initdata VFP_arch; +static unsigned int VFP_arch; + +#ifdef CONFIG_CPU_FEROCEON +extern unsigned int VFP_arch_feroceon __alias(VFP_arch); +#endif /* * The pointer to the vfpstate structure of the thread which currently @@ -357,14 +361,12 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) } if (fpexc & FPEXC_EX) { -#ifndef CONFIG_CPU_FEROCEON /* * Asynchronous exception. The instruction is read from FPINST * and the interrupted instruction has to be restarted. */ trigger = fmrx(FPINST); regs->ARM_pc -= 4; -#endif } else if (!(fpexc & FPEXC_DEX)) { /* * Illegal combination of bits. It can be caused by an -- cgit From 4708fb041346fa9cc6745dafb8c248a3e2f1075b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 16 Mar 2023 00:51:42 +0100 Subject: ARM: vfp: Reimplement VFP exception entry in C code En/disabling softirqs from asm code turned out to be trickier than expected, so vfp_support_entry now returns by tail calling __local_enable_bh_ip() and passing the same arguments that a C call to local_bh_enable() would pass. However, this is slightly hacky, as we don't want to carry our own implementation of local_bh_enable(). So let's bite the bullet, and get rid of the asm logic in vfp_support_entry that reasons about whether or not to save and/or reload the VFP state, and about whether or not an FP exception is pending, and only keep the VFP loading logic as a function that is callable from C. Replicate the removed logic in vfp_entry(), and use the exact same reasoning as in the asm code. To emphasize the correspondence, retain some of the asm comments in the C version as well. Signed-off-by: Ard Biesheuvel Acked-by: Linus Walleij --- arch/arm/vfp/entry.S | 12 +-- arch/arm/vfp/vfp.h | 1 + arch/arm/vfp/vfphw.S | 204 ++++------------------------------------------- arch/arm/vfp/vfpmodule.c | 123 +++++++++++++++++++++++----- 4 files changed, 124 insertions(+), 216 deletions(-) diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 62206ef25037..547c94c62cd3 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,10 +22,10 @@ @ IRQs enabled. @ ENTRY(do_vfp) - mov r1, r10 - str lr, [sp, #-8]! - add r3, sp, #4 - str r9, [r3] - bl vfp_entry - ldr pc, [sp], #8 + mov r1, r0 @ pass trigger opcode via R1 + mov r0, sp @ pass struct pt_regs via R0 + bl vfp_support_entry @ dispatch the VFP exception + cmp r0, #0 @ handled successfully? + reteq r9 @ then use R9 as return address + ret lr @ pass to undef handler ENDPROC(do_vfp) diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h index 5cd6d5053271..e43a630f8a16 100644 --- a/arch/arm/vfp/vfp.h +++ b/arch/arm/vfp/vfp.h @@ -375,3 +375,4 @@ struct op { }; asmlinkage void vfp_save_state(void *location, u32 fpexc); +asmlinkage u32 vfp_load_state(const void *location); diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 0aeb60ac3b53..d5a03f3c10c5 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -4,12 +4,6 @@ * * Copyright (C) 2004 ARM Limited. * Written by Deep Blue Solutions Limited. - * - * This code is called from the kernel's undefined instruction trap. - * r1 holds the thread_info pointer - * r3 holds the return address for successful handling. - * lr holds the return address for unrecognised instructions. - * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) */ #include #include @@ -19,20 +13,6 @@ #include #include - .macro DBGSTR, str -#ifdef DEBUG - stmfd sp!, {r0-r3, ip, lr} - ldr r0, =1f - bl _printk - ldmfd sp!, {r0-r3, ip, lr} - - .pushsection .rodata, "a" -1: .ascii KERN_DEBUG "VFP: \str\n" - .byte 0 - .previous -#endif - .endm - .macro DBGSTR1, str, arg #ifdef DEBUG stmfd sp!, {r0-r3, ip, lr} @@ -48,177 +28,25 @@ #endif .endm - .macro DBGSTR3, str, arg1, arg2, arg3 -#ifdef DEBUG - stmfd sp!, {r0-r3, ip, lr} - mov r3, \arg3 - mov r2, \arg2 - mov r1, \arg1 - ldr r0, =1f - bl _printk - ldmfd sp!, {r0-r3, ip, lr} - - .pushsection .rodata, "a" -1: .ascii KERN_DEBUG "VFP: \str\n" - .byte 0 - .previous -#endif - .endm - - -@ VFP hardware support entry point. -@ -@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) -@ r1 = thread_info pointer -@ r2 = PC value to resume execution after successful emulation -@ r3 = normal "successful" return address -@ lr = unrecognised instruction return address -@ IRQs enabled. -ENTRY(vfp_support_entry) - ldr r11, [r1, #TI_CPU] @ CPU number - add r10, r1, #TI_VFPSTATE @ r10 = workspace - - DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 - - .fpu vfpv2 - VFPFMRX r1, FPEXC @ Is the VFP enabled? - DBGSTR1 "fpexc %08x", r1 - tst r1, #FPEXC_EN - bne look_for_VFP_exceptions @ VFP is already enabled - - DBGSTR1 "enable %x", r10 - ldr r9, vfp_current_hw_state_address - orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set - ldr r4, [r9, r11, lsl #2] @ vfp_current_hw_state pointer - bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled - cmp r4, r10 @ this thread owns the hw context? -#ifndef CONFIG_SMP - @ For UP, checking that this thread owns the hw context is - @ sufficient to determine that the hardware state is valid. - beq vfp_hw_state_valid - - @ On UP, we lazily save the VFP context. As a different - @ thread wants ownership of the VFP hardware, save the old - @ state if there was a previous (valid) owner. - - VFPFMXR FPEXC, r5 @ enable VFP, disable any pending - @ exceptions, so we can get at the - @ rest of it - - DBGSTR1 "save old state %p", r4 - cmp r4, #0 @ if the vfp_current_hw_state is NULL - beq vfp_reload_hw @ then the hw state needs reloading - VFPFSTMIA r4, r5 @ save the working registers - VFPFMRX r5, FPSCR @ current status - tst r1, #FPEXC_EX @ is there additional state to save? - beq 1f - VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) - tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? - beq 1f - VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) -1: - stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 -vfp_reload_hw: - -#else - @ For SMP, if this thread does not own the hw context, then we - @ need to reload it. No need to save the old state as on SMP, - @ we always save the state when we switch away from a thread. - bne vfp_reload_hw - - @ This thread has ownership of the current hardware context. - @ However, it may have been migrated to another CPU, in which - @ case the saved state is newer than the hardware context. - @ Check this by looking at the CPU number which the state was - @ last loaded onto. - ldr ip, [r10, #VFP_CPU] - teq ip, r11 - beq vfp_hw_state_valid - -vfp_reload_hw: - @ We're loading this threads state into the VFP hardware. Update - @ the CPU number which contains the most up to date VFP context. - str r11, [r10, #VFP_CPU] - - VFPFMXR FPEXC, r5 @ enable VFP, disable any pending - @ exceptions, so we can get at the - @ rest of it -#endif - - DBGSTR1 "load state %p", r10 - str r10, [r9, r11, lsl #2] @ update the vfp_current_hw_state pointer +ENTRY(vfp_load_state) + @ Load the current VFP state + @ r0 - load location + @ returns FPEXC + DBGSTR1 "load VFP state %p", r0 @ Load the saved state back into the VFP - VFPFLDMIA r10, r5 @ reload the working registers while + VFPFLDMIA r0, r1 @ reload the working registers while @ FPEXC is in a safe state - ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 - tst r1, #FPEXC_EX @ is there additional state to restore? + ldmia r0, {r0-r3} @ load FPEXC, FPSCR, FPINST, FPINST2 + tst r0, #FPEXC_EX @ is there additional state to restore? beq 1f - VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) - tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? + VFPFMXR FPINST, r2 @ restore FPINST (only if FPEXC.EX is set) + tst r0, #FPEXC_FP2V @ is there an FPINST2 to write? beq 1f - VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) + VFPFMXR FPINST2, r3 @ FPINST2 if needed (and present) 1: - VFPFMXR FPSCR, r5 @ restore status - -@ The context stored in the VFP hardware is up to date with this thread -vfp_hw_state_valid: - tst r1, #FPEXC_EX - bne process_exception @ might as well handle the pending - @ exception before retrying branch - @ out before setting an FPEXC that - @ stops us reading stuff - VFPFMXR FPEXC, r1 @ Restore FPEXC last - mov sp, r3 @ we think we have handled things - pop {lr} - sub r2, r2, #4 @ Retry current instruction - if Thumb - str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, - @ else it's one 32-bit instruction, so - @ always subtract 4 from the following - @ instruction address. - -local_bh_enable_and_ret: - adr r0, . - mov r1, #SOFTIRQ_DISABLE_OFFSET - b __local_bh_enable_ip @ tail call - -look_for_VFP_exceptions: - @ Check for synchronous or asynchronous exception - tst r1, #FPEXC_EX | FPEXC_DEX - bne process_exception - @ On some implementations of the VFP subarch 1, setting FPSCR.IXE - @ causes all the CDP instructions to be bounced synchronously without - @ setting the FPEXC.EX bit - VFPFMRX r5, FPSCR - tst r5, #FPSCR_IXE - bne process_exception - - tst r5, #FPSCR_LENGTH_MASK - beq skip - orr r1, r1, #FPEXC_DEX - b process_exception -skip: - - @ Fall into hand on to next handler - appropriate coproc instr - @ not recognised by VFP - - DBGSTR "not VFP" - b local_bh_enable_and_ret - -process_exception: - DBGSTR "bounce" - mov sp, r3 @ setup for a return to the user code. - pop {lr} - mov r2, sp @ nothing stacked - regdump is at TOS - - @ Now call the C code to package up the bounce to the support code - @ r0 holds the trigger instruction - @ r1 holds the FPEXC value - @ r2 pointer to register dump - b VFP_bounce @ we have handled this - the support - @ code will raise an exception if - @ required. If not, the user code will - @ retry the faulted instruction -ENDPROC(vfp_support_entry) + VFPFMXR FPSCR, r1 @ restore status + ret lr +ENDPROC(vfp_load_state) ENTRY(vfp_save_state) @ Save the current VFP state @@ -238,10 +66,6 @@ ENTRY(vfp_save_state) ret lr ENDPROC(vfp_save_state) - .align -vfp_current_hw_state_address: - .word vfp_current_hw_state - .macro tbl_branch, base, tmp, shift #ifdef CONFIG_THUMB2_KERNEL adr \tmp, 1f diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 95628e57807b..7572cb5b28a2 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -30,11 +30,6 @@ #include "vfpinstr.h" #include "vfp.h" -/* - * Our undef handlers (in entry.S) - */ -asmlinkage void vfp_support_entry(u32, void *, u32, u32); - static bool have_vfp __ro_after_init; /* @@ -325,7 +320,7 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) /* * Package up a bounce condition. */ -void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) +static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) { u32 fpscr, orig_fpscr, fpsid, exceptions; @@ -374,7 +369,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * on VFP subarch 1. */ vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); - goto exit; + return; } /* @@ -405,7 +400,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) - goto exit; + return; /* * The barrier() here prevents fpinst2 being read @@ -418,8 +413,6 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); if (exceptions) vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); - exit: - local_bh_enable(); } static void vfp_enable(void *unused) @@ -649,22 +642,112 @@ static int vfp_starting_cpu(unsigned int unused) } /* - * Entered with: + * vfp_support_entry - Handle VFP exception from user mode * - * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) - * r1 = thread_info pointer - * r2 = PC value to resume execution after successful emulation - * r3 = normal "successful" return address - * lr = unrecognised instruction return address + * @regs: pt_regs structure holding the register state at exception entry + * @trigger: The opcode of the instruction that triggered the exception + * + * Returns 0 if the exception was handled, or an error code otherwise. */ -asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc, - u32 resume_return_address) +asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger) { + struct thread_info *ti = current_thread_info(); + u32 fpexc; + if (unlikely(!have_vfp)) - return; + return -ENODEV; local_bh_disable(); - vfp_support_entry(trigger, ti, resume_pc, resume_return_address); + fpexc = fmrx(FPEXC); + + /* + * If the VFP unit was not enabled yet, we have to check whether the + * VFP state in the CPU's registers is the most recent VFP state + * associated with the process. On UP systems, we don't save the VFP + * state eagerly on a context switch, so we may need to save the + * VFP state to memory first, as it may belong to another process. + */ + if (!(fpexc & FPEXC_EN)) { + /* + * Enable the VFP unit but mask the FP exception flag for the + * time being, so we can access all the registers. + */ + fpexc |= FPEXC_EN; + fmxr(FPEXC, fpexc & ~FPEXC_EX); + + /* + * Check whether or not the VFP state in the CPU's registers is + * the most recent VFP state associated with this task. On SMP, + * migration may result in multiple CPUs holding VFP states + * that belong to the same task, but only the most recent one + * is valid. + */ + if (!vfp_state_in_hw(ti->cpu, ti)) { + if (!IS_ENABLED(CONFIG_SMP) && + vfp_current_hw_state[ti->cpu] != NULL) { + /* + * This CPU is currently holding the most + * recent VFP state associated with another + * task, and we must save that to memory first. + */ + vfp_save_state(vfp_current_hw_state[ti->cpu], + fpexc); + } + + /* + * We can now proceed with loading the task's VFP state + * from memory into the CPU registers. + */ + fpexc = vfp_load_state(&ti->vfpstate); + vfp_current_hw_state[ti->cpu] = &ti->vfpstate; +#ifdef CONFIG_SMP + /* + * Record that this CPU is now the one holding the most + * recent VFP state of the task. + */ + ti->vfpstate.hard.cpu = ti->cpu; +#endif + } + + if (fpexc & FPEXC_EX) + /* + * Might as well handle the pending exception before + * retrying branch out before setting an FPEXC that + * stops us reading stuff. + */ + goto bounce; + + /* + * No FP exception is pending: just enable the VFP and + * replay the instruction that trapped. + */ + fmxr(FPEXC, fpexc); + regs->ARM_pc -= 4; + } else { + /* Check for synchronous or asynchronous exceptions */ + if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { + u32 fpscr = fmrx(FPSCR); + + /* + * On some implementations of the VFP subarch 1, + * setting FPSCR.IXE causes all the CDP instructions to + * be bounced synchronously without setting the + * FPEXC.EX bit + */ + if (!(fpscr & FPSCR_IXE)) { + if (!(fpscr & FPSCR_LENGTH_MASK)) { + pr_debug("not VFP\n"); + local_bh_enable(); + return -ENOEXEC; + } + fpexc |= FPEXC_DEX; + } + } +bounce: VFP_bounce(trigger, fpexc, regs); + } + + local_bh_enable(); + return 0; } #ifdef CONFIG_KERNEL_MODE_NEON -- cgit From 6ee1e6772e1e19436f573672de5ff8aab7163be6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 19 Mar 2023 23:55:14 +0100 Subject: ARM: kernel: Get rid of thread_info::used_cp[] array We keep track of which coprocessor triggered a fault in the used_cp[] array in thread_info, but this data is never used anywhere. So let's remove it. Linus did some digging and found out that the last user of this field was removed in commit bb1a773d5b6b ("kill unused dump_fpu() instances"). Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/include/asm/thread_info.h | 1 - arch/arm/kernel/asm-offsets.c | 1 - arch/arm/kernel/entry-armv.S | 6 ------ arch/arm/kernel/process.c | 1 - arch/arm/kernel/ptrace.c | 2 -- 5 files changed, 11 deletions(-) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7f092cb55a41..85c5f1e02ebf 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -66,7 +66,6 @@ struct thread_info { __u32 cpu_domain; /* cpu domain */ struct cpu_context_save cpu_context; /* cpu context */ __u32 abi_syscall; /* ABI type and syscall nr */ - __u8 used_cp[16]; /* thread used copro */ unsigned long tp_value[2]; /* TLS registers */ union fp_state fpstate __attribute__((aligned(8))); union vfp_state vfpstate; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 38121c59cbc2..f9c7111c1d65 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -47,7 +47,6 @@ int main(void) DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall)); - DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); #ifdef CONFIG_VFP diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index c39303e5c234..ba47f6aac5ff 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -591,9 +591,6 @@ call_fpe: and r8, r0, r5 cmp r8, r7 @ NEON instruction? bne 2b - mov r7, #1 - strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used - strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used b do_vfp @ let VFP handler handle this 1: #endif @@ -601,9 +598,6 @@ call_fpe: tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 reteq lr and r8, r0, #0x00000f00 @ mask out CP number - mov r7, #1 - add r6, r10, r8, lsr #8 @ add used_cp[] array offset first - strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[] #ifdef CONFIG_IWMMXT @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 0e8ff85890ad..e16ed102960c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -222,7 +222,6 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); - memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 2d8e2516906b..2b945b9bd366 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -584,8 +584,6 @@ static int fpa_set(struct task_struct *target, { struct thread_info *thread = task_thread_info(target); - thread->used_cp[1] = thread->used_cp[2] = 1; - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &thread->fpstate, 0, sizeof(struct user_fp)); -- cgit From cdd87465adfd75e4ebd11507575533c6bf7a5525 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 19 Mar 2023 00:28:35 +0100 Subject: ARM: vfp: Use undef hook for handling VFP exceptions Now that the VFP support code has been reimplemented as a C function that takes a struct pt_regs pointer and an opcode, we can use the existing undef_hook framework to deal with undef exceptions triggered by VFP instructions instead of having special handling in assembler. Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/kernel/entry-armv.S | 53 --------------------- arch/arm/vfp/Makefile | 2 +- arch/arm/vfp/entry.S | 31 ------------ arch/arm/vfp/vfpmodule.c | 109 +++++++++++++++++++++---------------------- 4 files changed, 54 insertions(+), 141 deletions(-) delete mode 100644 arch/arm/vfp/entry.S diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index ba47f6aac5ff..0e40b2566f59 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -557,13 +557,6 @@ ENDPROC(__und_usr) * co-processor instructions. However, we have to watch out * for the ARM6/ARM7 SWI bug. * - * NEON is a special case that has to be handled here. Not all - * NEON instructions are co-processor instructions, so we have - * to make a special case of checking for them. Plus, there's - * five groups of them, so we have a table of mask/opcode pairs - * to check against, and if any match then we branch off into the - * NEON handler code. - * * Emulators may wish to make use of the following registers: * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) * r2 = PC value to resume execution after successful emulation @@ -575,25 +568,8 @@ ENDPROC(__und_usr) @ @ Fall-through from Thumb-2 __und_usr @ -#ifdef CONFIG_NEON - get_thread_info r10 @ get current thread - adr r6, .LCneon_thumb_opcodes - b 2f -#endif call_fpe: get_thread_info r10 @ get current thread -#ifdef CONFIG_NEON - adr r6, .LCneon_arm_opcodes -2: ldr r5, [r6], #4 @ mask value - ldr r7, [r6], #4 @ opcode bits matching in mask - cmp r5, #0 @ end mask? - beq 1f - and r8, r0, r5 - cmp r8, r7 @ NEON instruction? - bne 2b - b do_vfp @ let VFP handler handle this -1: -#endif tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 reteq lr @@ -620,42 +596,13 @@ call_fpe: ret.w lr @ CP#7 ret.w lr @ CP#8 ret.w lr @ CP#9 -#ifdef CONFIG_VFP - W(b) do_vfp @ CP#10 (VFP) - W(b) do_vfp @ CP#11 (VFP) -#else ret.w lr @ CP#10 (VFP) ret.w lr @ CP#11 (VFP) -#endif ret.w lr @ CP#12 ret.w lr @ CP#13 ret.w lr @ CP#14 (Debug) ret.w lr @ CP#15 (Control) -#ifdef CONFIG_NEON - .align 6 - -.LCneon_arm_opcodes: - .word 0xfe000000 @ mask - .word 0xf2000000 @ opcode - - .word 0xff100000 @ mask - .word 0xf4000000 @ opcode - - .word 0x00000000 @ mask - .word 0x00000000 @ opcode - -.LCneon_thumb_opcodes: - .word 0xef000000 @ mask - .word 0xef000000 @ opcode - - .word 0xff100000 @ mask - .word 0xf9000000 @ opcode - - .word 0x00000000 @ mask - .word 0x00000000 @ opcode -#endif - do_fpe: add r10, r10, #TI_FPSTATE @ r10 = workspace ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile index 749901a72d6d..dfd64bc2b2fb 100644 --- a/arch/arm/vfp/Makefile +++ b/arch/arm/vfp/Makefile @@ -8,4 +8,4 @@ # ccflags-y := -DDEBUG # asflags-y := -DDEBUG -obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o +obj-y += vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S deleted file mode 100644 index 547c94c62cd3..000000000000 --- a/arch/arm/vfp/entry.S +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/arch/arm/vfp/entry.S - * - * Copyright (C) 2004 ARM Limited. - * Written by Deep Blue Solutions Limited. - */ -#include -#include -#include -#include -#include -#include - -@ VFP entry point. -@ -@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) -@ r2 = PC value to resume execution after successful emulation -@ r9 = normal "successful" return address -@ r10 = this threads thread_info structure -@ lr = unrecognised instruction return address -@ IRQs enabled. -@ -ENTRY(do_vfp) - mov r1, r0 @ pass trigger opcode via R1 - mov r0, sp @ pass struct pt_regs via R0 - bl vfp_support_entry @ dispatch the VFP exception - cmp r0, #0 @ handled successfully? - reteq r9 @ then use R9 as return address - ret lr @ pass to undef handler -ENDPROC(do_vfp) diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 7572cb5b28a2..58a9442add24 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -641,15 +641,37 @@ static int vfp_starting_cpu(unsigned int unused) return 0; } +static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) +{ + /* + * If we reach this point, a floating point exception has been raised + * while running in kernel mode. If the NEON/VFP unit was enabled at the + * time, it means a VFP instruction has been issued that requires + * software assistance to complete, something which is not currently + * supported in kernel mode. + * If the NEON/VFP unit was disabled, and the location pointed to below + * is properly preceded by a call to kernel_neon_begin(), something has + * caused the task to be scheduled out and back in again. In this case, + * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should + * be helpful in localizing the problem. + */ + if (fmrx(FPEXC) & FPEXC_EN) + pr_crit("BUG: unsupported FP instruction in kernel mode\n"); + else + pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); + pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC)); + return 1; +} + /* - * vfp_support_entry - Handle VFP exception from user mode + * vfp_support_entry - Handle VFP exception * * @regs: pt_regs structure holding the register state at exception entry * @trigger: The opcode of the instruction that triggered the exception * * Returns 0 if the exception was handled, or an error code otherwise. */ -asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger) +static int vfp_support_entry(struct pt_regs *regs, u32 trigger) { struct thread_info *ti = current_thread_info(); u32 fpexc; @@ -657,6 +679,9 @@ asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger) if (unlikely(!have_vfp)) return -ENODEV; + if (!user_mode(regs)) + return vfp_kmode_exception(regs, trigger); + local_bh_disable(); fpexc = fmrx(FPEXC); @@ -722,7 +747,6 @@ asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger) * replay the instruction that trapped. */ fmxr(FPEXC, fpexc); - regs->ARM_pc -= 4; } else { /* Check for synchronous or asynchronous exceptions */ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { @@ -743,78 +767,47 @@ asmlinkage int vfp_support_entry(struct pt_regs *regs, u32 trigger) fpexc |= FPEXC_DEX; } } -bounce: VFP_bounce(trigger, fpexc, regs); +bounce: regs->ARM_pc += 4; + VFP_bounce(trigger, fpexc, regs); } local_bh_enable(); return 0; } -#ifdef CONFIG_KERNEL_MODE_NEON - -static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) -{ - /* - * If we reach this point, a floating point exception has been raised - * while running in kernel mode. If the NEON/VFP unit was enabled at the - * time, it means a VFP instruction has been issued that requires - * software assistance to complete, something which is not currently - * supported in kernel mode. - * If the NEON/VFP unit was disabled, and the location pointed to below - * is properly preceded by a call to kernel_neon_begin(), something has - * caused the task to be scheduled out and back in again. In this case, - * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should - * be helpful in localizing the problem. - */ - if (fmrx(FPEXC) & FPEXC_EN) - pr_crit("BUG: unsupported FP instruction in kernel mode\n"); - else - pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n"); - pr_crit("FPEXC == 0x%08x\n", fmrx(FPEXC)); - return 1; -} - -static struct undef_hook vfp_kmode_exception_hook[] = {{ +static struct undef_hook neon_support_hook[] = {{ .instr_mask = 0xfe000000, .instr_val = 0xf2000000, - .cpsr_mask = MODE_MASK | PSR_T_BIT, - .cpsr_val = SVC_MODE, - .fn = vfp_kmode_exception, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = 0, + .fn = vfp_support_entry, }, { .instr_mask = 0xff100000, .instr_val = 0xf4000000, - .cpsr_mask = MODE_MASK | PSR_T_BIT, - .cpsr_val = SVC_MODE, - .fn = vfp_kmode_exception, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = 0, + .fn = vfp_support_entry, }, { .instr_mask = 0xef000000, .instr_val = 0xef000000, - .cpsr_mask = MODE_MASK | PSR_T_BIT, - .cpsr_val = SVC_MODE | PSR_T_BIT, - .fn = vfp_kmode_exception, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = vfp_support_entry, }, { .instr_mask = 0xff100000, .instr_val = 0xf9000000, - .cpsr_mask = MODE_MASK | PSR_T_BIT, - .cpsr_val = SVC_MODE | PSR_T_BIT, - .fn = vfp_kmode_exception, -}, { - .instr_mask = 0x0c000e00, - .instr_val = 0x0c000a00, - .cpsr_mask = MODE_MASK, - .cpsr_val = SVC_MODE, - .fn = vfp_kmode_exception, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = vfp_support_entry, }}; -static int __init vfp_kmode_exception_hook_init(void) -{ - int i; +static struct undef_hook vfp_support_hook = { + .instr_mask = 0x0c000e00, + .instr_val = 0x0c000a00, + .fn = vfp_support_entry, +}; - for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++) - register_undef_hook(&vfp_kmode_exception_hook[i]); - return 0; -} -subsys_initcall(vfp_kmode_exception_hook_init); +#ifdef CONFIG_KERNEL_MODE_NEON /* * Kernel-side NEON support functions @@ -919,8 +912,11 @@ static int __init vfp_init(void) * for NEON if the hardware has the MVFR registers. */ if (IS_ENABLED(CONFIG_NEON) && - (fmrx(MVFR1) & 0x000fff00) == 0x00011100) + (fmrx(MVFR1) & 0x000fff00) == 0x00011100) { elf_hwcap |= HWCAP_NEON; + for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++) + register_undef_hook(&neon_support_hook[i]); + } if (IS_ENABLED(CONFIG_VFPv3)) { u32 mvfr0 = fmrx(MVFR0); @@ -989,6 +985,7 @@ static int __init vfp_init(void) have_vfp = true; + register_undef_hook(&vfp_support_hook); thread_register_notifier(&vfp_notifier_block); vfp_pm_init(); -- cgit From 8bcba70cb5c2204a011e06278a1fbfb1213e1df1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 19 Mar 2023 15:18:25 +0100 Subject: ARM: entry: Disregard Thumb undef exception in coproc dispatch Now that the only remaining coprocessor instructions being handled via the dispatch in entry-armv.S are ones that only exist in a ARM (A32) encoding, we can simplify the handling of Thumb undef exceptions, and send them straight to the undefined instruction handlers in C code. This also means we can drop the code that partially decodes the instruction to decide whether it is a 16-bit or 32-bit Thumb instruction: this is all taken care of by the undef hook. Acked-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/kernel/entry-armv.S | 121 +++++++------------------------------------ 1 file changed, 18 insertions(+), 103 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0e40b2566f59..aff6cfe58745 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -446,106 +446,32 @@ ENDPROC(__irq_usr) __und_usr: usr_entry uaccess=0 - mov r2, r4 - mov r3, r5 - - @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the - @ faulting instruction depending on Thumb mode. - @ r3 = regs->ARM_cpsr - @ - @ The emulation code returns using r9 if it has emulated the - @ instruction, or the more conventional lr if we are to treat - @ this as a real undefined instruction - @ - badr r9, ret_from_exception - @ IRQs must be enabled before attempting to read the instruction from @ user space since that could cause a page/translation fault if the @ page table was modified by another CPU. enable_irq - tst r3, #PSR_T_BIT @ Thumb mode? - bne __und_usr_thumb - sub r4, r2, #4 @ ARM instr at LR - 4 -1: ldrt r0, [r4] - ARM_BE8(rev r0, r0) @ little endian instruction - + tst r5, #PSR_T_BIT @ Thumb mode? + mov r1, #2 @ set insn size to 2 for Thumb + bne 0f @ handle as Thumb undef exception + adr r9, ret_from_exception + bl call_fpe @ returns via R9 on success + mov r1, #4 @ set insn size to 4 for ARM +0: mov r0, sp uaccess_disable ip - - @ r0 = 32-bit ARM instruction which caused the exception - @ r2 = PC value for the following instruction (:= regs->ARM_pc) - @ r4 = PC value for the faulting instruction - @ lr = 32-bit undefined instruction function - badr lr, __und_usr_fault_32 - b call_fpe - -__und_usr_thumb: - @ Thumb instruction - sub r4, r2, #2 @ First half of thumb instr at LR - 2 -#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 -/* - * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms - * can never be supported in a single kernel, this code is not applicable at - * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be - * made about .arch directives. - */ -#if __LINUX_ARM_ARCH__ < 7 -/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ - ldr_va r5, cpu_architecture - cmp r5, #CPU_ARCH_ARMv7 - blo __und_usr_fault_16 @ 16bit undefined instruction -/* - * The following code won't get run unless the running CPU really is v7, so - * coding round the lack of ldrht on older arches is pointless. Temporarily - * override the assembler target arch with the minimum required instead: - */ - .arch armv6t2 -#endif -2: ldrht r5, [r4] -ARM_BE8(rev16 r5, r5) @ little endian instruction - cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16_pan @ 16bit undefined instruction -3: ldrht r0, [r2] -ARM_BE8(rev16 r0, r0) @ little endian instruction - uaccess_disable ip - add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 - str r2, [sp, #S_PC] @ it's a 2x16bit instr, update - orr r0, r0, r5, lsl #16 - badr lr, __und_usr_fault_32 - @ r0 = the two 16-bit Thumb instructions which caused the exception - @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) - @ r4 = PC value for the first 16-bit Thumb instruction - @ lr = 32bit undefined instruction function - -#if __LINUX_ARM_ARCH__ < 7 -/* If the target arch was overridden, change it back: */ -#ifdef CONFIG_CPU_32v6K - .arch armv6k -#else - .arch armv6 -#endif -#endif /* __LINUX_ARM_ARCH__ < 7 */ -#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ - b __und_usr_fault_16 -#endif + bl __und_fault + b ret_from_exception UNWIND(.fnend) ENDPROC(__und_usr) /* - * The out of line fixup for the ldrt instructions above. + * The out of line fixup for the ldrt instruction below. */ .pushsection .text.fixup, "ax" .align 2 4: str r4, [sp, #S_PC] @ retry current instruction ret r9 .popsection - .pushsection __ex_table,"a" - .long 1b, 4b -#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 - .long 2b, 4b - .long 3b, 4b -#endif - .popsection /* * Check whether the instruction is a co-processor instruction. @@ -558,20 +484,22 @@ ENDPROC(__und_usr) * for the ARM6/ARM7 SWI bug. * * Emulators may wish to make use of the following registers: - * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) - * r2 = PC value to resume execution after successful emulation + * r4 = PC value to resume execution after successful emulation * r9 = normal "successful" return address * r10 = this threads thread_info structure * lr = unrecognised instruction return address * IRQs enabled, FIQs enabled. */ - @ - @ Fall-through from Thumb-2 __und_usr - @ call_fpe: + mov r2, r4 + sub r4, r4, #4 @ ARM instruction at user PC - 4 +USERL( 4b, ldrt r0, [r4]) @ load opcode from user space +ARM_BE8(rev r0, r0) @ little endian instruction + + uaccess_disable ip + get_thread_info r10 @ get current thread tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 - tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 reteq lr and r8, r0, #0x00000f00 @ mask out CP number #ifdef CONFIG_IWMMXT @@ -626,19 +554,6 @@ ENTRY(no_fp) ret lr ENDPROC(no_fp) -__und_usr_fault_32: - mov r1, #4 - b 1f -__und_usr_fault_16_pan: - uaccess_disable ip -__und_usr_fault_16: - mov r1, #2 -1: mov r0, sp - badr lr, ret_from_exception - b __und_fault -ENDPROC(__und_usr_fault_32) -ENDPROC(__und_usr_fault_16) - .align 5 __pabt_usr: usr_entry -- cgit From 303d6da167dcbc3dd89adf3ca4e36c369950ed01 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Mar 2023 00:07:20 +0100 Subject: ARM: iwmmxt: Use undef hook to enable coprocessor for task Define a undef hook to deal with undef exceptions triggered by iwmmxt instructions that were issued with the coprocessor disabled. This removes the dependency on the coprocessor dispatch code in entry-armv.S, which will be made NWFPE-only in a subsequent patch. Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/include/asm/thread_info.h | 16 ++++++++++++++++ arch/arm/kernel/entry-armv.S | 1 + arch/arm/kernel/iwmmxt.S | 18 ++++++++++++++---- arch/arm/kernel/pj4-cp0.c | 1 + arch/arm/kernel/xscale-cp0.c | 1 + 5 files changed, 33 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 85c5f1e02ebf..943ffcf069d2 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -40,6 +40,7 @@ struct task_struct; DECLARE_PER_CPU(struct task_struct *, __entry_task); #include +#include struct cpu_context_save { __u32 r4; @@ -104,6 +105,21 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); +extern int iwmmxt_undef_handler(struct pt_regs *, u32); + +static inline void register_iwmmxt_undef_handler(void) +{ + static struct undef_hook iwmmxt_undef_hook = { + .instr_mask = 0x0c000e00, + .instr_val = 0x0c000000, + .cpsr_mask = MODE_MASK | PSR_T_BIT, + .cpsr_val = USR_MODE, + .fn = iwmmxt_undef_handler, + }; + + register_undef_hook(&iwmmxt_undef_hook); +} + extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index aff6cfe58745..822b2c83bf08 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -507,6 +507,7 @@ ARM_BE8(rev r0, r0) @ little endian instruction ldr r5, [r10, #TI_FLAGS] rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) + movcs r0, sp @ pass struct pt_regs bcs iwmmxt_task_enable #endif ARM( add pc, pc, r8, lsr #6 ) diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index d2b4ac06e4ed..a0218c4867b9 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -58,9 +58,19 @@ .text .arm +ENTRY(iwmmxt_undef_handler) + push {r9, r10, lr} + get_thread_info r10 + mov r9, pc + b iwmmxt_task_enable + mov r0, #0 + pop {r9, r10, pc} +ENDPROC(iwmmxt_undef_handler) + /* * Lazy switching of Concan coprocessor context * + * r0 = struct pt_regs pointer * r10 = struct thread_info pointer * r9 = ret_from_exception * lr = undefined instr exit @@ -84,12 +94,12 @@ ENTRY(iwmmxt_task_enable) PJ4(mcr p15, 0, r2, c1, c0, 2) ldr r3, =concan_owner - add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area - ldr r2, [sp, #60] @ current task pc value + ldr r2, [r0, #S_PC] @ current task pc value ldr r1, [r3] @ get current Concan owner - str r0, [r3] @ this task now owns Concan regs sub r2, r2, #4 @ adjust pc back - str r2, [sp, #60] + str r2, [r0, #S_PC] + add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area + str r0, [r3] @ this task now owns Concan regs mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c index 1d1fb22f44f3..4bca8098c4ff 100644 --- a/arch/arm/kernel/pj4-cp0.c +++ b/arch/arm/kernel/pj4-cp0.c @@ -126,6 +126,7 @@ static int __init pj4_cp0_init(void) pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); + register_iwmmxt_undef_handler(); #endif return 0; diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index ed4f6e77616d..00d00d3aae97 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c @@ -166,6 +166,7 @@ static int __init xscale_cp0_init(void) pr_info("XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); + register_iwmmxt_undef_handler(); #endif } else { pr_info("XScale DSP coprocessor detected.\n"); -- cgit From 47ba5f39eab3c2a9a1ba878159a6050f2bbfc0e2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Mar 2023 00:25:18 +0100 Subject: ARM: entry: Make asm coproc dispatch code NWFPE only Now that we can dispatch all VFP and iWMMXT related undef exceptions using undef hooks implemented in C code, we no longer need the asm entry code that takes care of this unless we are using FPE, so we can move it into the FPE entry code. As this means it is ARM only, we can remove the Thumb2 specific decorations as well. It also means the non-standard, asm-only calling convention where returning via LR means failure and returning via R9 means success is now only used on legacy platforms that lack any kind of function return prediction, avoiding the associated performance impact. Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel --- arch/arm/kernel/entry-armv.S | 93 +------------------------------------------- arch/arm/nwfpe/entry.S | 77 ++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 91 deletions(-) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 822b2c83bf08..682e92664b07 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -454,8 +454,10 @@ __und_usr: tst r5, #PSR_T_BIT @ Thumb mode? mov r1, #2 @ set insn size to 2 for Thumb bne 0f @ handle as Thumb undef exception +#ifdef CONFIG_FPE_NWFPE adr r9, ret_from_exception bl call_fpe @ returns via R9 on success +#endif mov r1, #4 @ set insn size to 4 for ARM 0: mov r0, sp uaccess_disable ip @@ -464,97 +466,6 @@ __und_usr: UNWIND(.fnend) ENDPROC(__und_usr) -/* - * The out of line fixup for the ldrt instruction below. - */ - .pushsection .text.fixup, "ax" - .align 2 -4: str r4, [sp, #S_PC] @ retry current instruction - ret r9 - .popsection - -/* - * Check whether the instruction is a co-processor instruction. - * If yes, we need to call the relevant co-processor handler. - * - * Note that we don't do a full check here for the co-processor - * instructions; all instructions with bit 27 set are well - * defined. The only instructions that should fault are the - * co-processor instructions. However, we have to watch out - * for the ARM6/ARM7 SWI bug. - * - * Emulators may wish to make use of the following registers: - * r4 = PC value to resume execution after successful emulation - * r9 = normal "successful" return address - * r10 = this threads thread_info structure - * lr = unrecognised instruction return address - * IRQs enabled, FIQs enabled. - */ -call_fpe: - mov r2, r4 - sub r4, r4, #4 @ ARM instruction at user PC - 4 -USERL( 4b, ldrt r0, [r4]) @ load opcode from user space -ARM_BE8(rev r0, r0) @ little endian instruction - - uaccess_disable ip - - get_thread_info r10 @ get current thread - tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 - reteq lr - and r8, r0, #0x00000f00 @ mask out CP number -#ifdef CONFIG_IWMMXT - @ Test if we need to give access to iWMMXt coprocessors - ldr r5, [r10, #TI_FLAGS] - rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only - movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) - movcs r0, sp @ pass struct pt_regs - bcs iwmmxt_task_enable -#endif - ARM( add pc, pc, r8, lsr #6 ) - THUMB( lsr r8, r8, #6 ) - THUMB( add pc, r8 ) - nop - - ret.w lr @ CP#0 - W(b) do_fpe @ CP#1 (FPE) - W(b) do_fpe @ CP#2 (FPE) - ret.w lr @ CP#3 - ret.w lr @ CP#4 - ret.w lr @ CP#5 - ret.w lr @ CP#6 - ret.w lr @ CP#7 - ret.w lr @ CP#8 - ret.w lr @ CP#9 - ret.w lr @ CP#10 (VFP) - ret.w lr @ CP#11 (VFP) - ret.w lr @ CP#12 - ret.w lr @ CP#13 - ret.w lr @ CP#14 (Debug) - ret.w lr @ CP#15 (Control) - -do_fpe: - add r10, r10, #TI_FPSTATE @ r10 = workspace - ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point - -/* - * The FP module is called with these registers set: - * r0 = instruction - * r2 = PC+4 - * r9 = normal "successful" return address - * r10 = FP workspace - * lr = unrecognised FP instruction return address - */ - - .pushsection .data - .align 2 -ENTRY(fp_enter) - .word no_fp - .popsection - -ENTRY(no_fp) - ret lr -ENDPROC(no_fp) - .align 5 __pabt_usr: usr_entry diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S index d8f9915566e1..354d297a193b 100644 --- a/arch/arm/nwfpe/entry.S +++ b/arch/arm/nwfpe/entry.S @@ -7,6 +7,7 @@ Direct questions, comments to Scott Bambrough */ +#include #include #include @@ -104,6 +105,7 @@ next: @ plain LDR instruction. Weird, but it seems harmless. .pushsection .text.fixup,"ax" .align 2 +.Lrep: str r4, [sp, #S_PC] @ retry current instruction .Lfix: ret r9 @ let the user eat segfaults .popsection @@ -111,3 +113,78 @@ next: .align 3 .long .Lx1, .Lfix .popsection + + @ + @ Check whether the instruction is a co-processor instruction. + @ If yes, we need to call the relevant co-processor handler. + @ Only FPE instructions are dispatched here, everything else + @ is handled by undef hooks. + @ + @ Emulators may wish to make use of the following registers: + @ r4 = PC value to resume execution after successful emulation + @ r9 = normal "successful" return address + @ lr = unrecognised instruction return address + @ IRQs enabled, FIQs enabled. + @ +ENTRY(call_fpe) + mov r2, r4 + sub r4, r4, #4 @ ARM instruction at user PC - 4 +USERL( .Lrep, ldrt r0, [r4]) @ load opcode from user space +ARM_BE8(rev r0, r0) @ little endian instruction + + uaccess_disable ip + + get_thread_info r10 @ get current thread + tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 + reteq lr + and r8, r0, #0x00000f00 @ mask out CP number +#ifdef CONFIG_IWMMXT + @ Test if we need to give access to iWMMXt coprocessors + ldr r5, [r10, #TI_FLAGS] + rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only + movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) + movcs r0, sp @ pass struct pt_regs + bcs iwmmxt_task_enable +#endif + add pc, pc, r8, lsr #6 + nop + + ret lr @ CP#0 + b do_fpe @ CP#1 (FPE) + b do_fpe @ CP#2 (FPE) + ret lr @ CP#3 + ret lr @ CP#4 + ret lr @ CP#5 + ret lr @ CP#6 + ret lr @ CP#7 + ret lr @ CP#8 + ret lr @ CP#9 + ret lr @ CP#10 (VFP) + ret lr @ CP#11 (VFP) + ret lr @ CP#12 + ret lr @ CP#13 + ret lr @ CP#14 (Debug) + ret lr @ CP#15 (Control) + +do_fpe: + add r10, r10, #TI_FPSTATE @ r10 = workspace + ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point + + @ + @ The FP module is called with these registers set: + @ r0 = instruction + @ r2 = PC+4 + @ r9 = normal "successful" return address + @ r10 = FP workspace + @ lr = unrecognised FP instruction return address + @ + + .pushsection .data + .align 2 +ENTRY(fp_enter) + .word no_fp + .popsection + +no_fp: + ret lr +ENDPROC(no_fp) -- cgit From e6b51532d5273eeefba84106daea3d392c602837 Mon Sep 17 00:00:00 2001 From: Tomislav Novak Date: Tue, 20 Jun 2023 18:54:11 +0100 Subject: ARM: 9316/1: hw_breakpoint: fix single-stepping when using bpf_overflow_handler Arm platforms use is_default_overflow_handler() to determine if the hw_breakpoint code should single-step over the breakpoint trigger or let the custom handler deal with it. Since bpf_overflow_handler() currently isn't recognized as a default handler, attaching a BPF program to a PERF_TYPE_BREAKPOINT event causes it to keep firing (the instruction triggering the data abort exception is never skipped). For example: # bpftrace -e 'watchpoint:0x10000:4:w { print("hit") }' -c ./test Attaching 1 probe... hit hit [...] ^C (./test performs a single 4-byte store to 0x10000) This patch replaces the check with uses_default_overflow_handler(), which accounts for the bpf_overflow_handler() case by also testing if one of the perf_event_output functions gets invoked indirectly, via orig_default_handler. Link: https://lore.kernel.org/linux-arm-kernel/20220923203644.2731604-1-tnovak@fb.com/ Signed-off-by: Tomislav Novak Tested-by: Samuel Gosselin # arm64 Reviewed-by: Catalin Marinas Acked-by: Alexei Starovoitov Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/hw_breakpoint.c | 8 ++++---- arch/arm64/kernel/hw_breakpoint.c | 4 ++-- include/linux/perf_event.h | 22 +++++++++++++++++++--- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 054e9199f30d..dc0fb7a81371 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, hw->address &= ~alignment_mask; hw->ctrl.len <<= offset; - if (is_default_overflow_handler(bp)) { + if (uses_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. @@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. */ - if (!is_default_overflow_handler(wp)) + if (!uses_default_overflow_handler(wp)) continue; step: enable_single_step(wp, instruction_pointer(regs)); @@ -811,7 +811,7 @@ step: info->trigger = addr; pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); perf_bp_event(wp, regs); - if (is_default_overflow_handler(wp)) + if (uses_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); } @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index db2a1861bb97..35225632d70a 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr, perf_bp_event(bp, regs); /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(bp)) + if (uses_default_overflow_handler(bp)) step = 1; unlock: rcu_read_unlock(); @@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, static int watchpoint_report(struct perf_event *wp, unsigned long addr, struct pt_regs *regs) { - int step = is_default_overflow_handler(wp); + int step = uses_default_overflow_handler(wp); struct arch_hw_breakpoint *info = counter_arch_bp(wp); info->trigger = addr; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2166a69e3bf2..e657916c9509 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1316,15 +1316,31 @@ extern int perf_event_output(struct perf_event *event, struct pt_regs *regs); static inline bool -is_default_overflow_handler(struct perf_event *event) +__is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { - if (likely(event->overflow_handler == perf_event_output_forward)) + if (likely(overflow_handler == perf_event_output_forward)) return true; - if (unlikely(event->overflow_handler == perf_event_output_backward)) + if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } +#define is_default_overflow_handler(event) \ + __is_default_overflow_handler((event)->overflow_handler) + +#ifdef CONFIG_BPF_SYSCALL +static inline bool uses_default_overflow_handler(struct perf_event *event) +{ + if (likely(is_default_overflow_handler(event))) + return true; + + return __is_default_overflow_handler(event->orig_overflow_handler); +} +#else +#define uses_default_overflow_handler(event) \ + is_default_overflow_handler(event) +#endif + extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, -- cgit From 8922ba71c969d2a0c01a94372a71477d879470de Mon Sep 17 00:00:00 2001 From: Mårten Lindahl Date: Tue, 8 Aug 2023 09:37:32 +0100 Subject: ARM: 9317/1: kexec: Make smp stop calls asynchronous MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a panic is triggered by a hrtimer interrupt all online cpus will be notified and set offline. But as highlighted by commit 19dbdcb8039c ("smp: Warn on function calls from softirq context") this call should not be made synchronous with disabled interrupts: softdog: Initiating panic Kernel panic - not syncing: Software Watchdog Timer expired WARNING: CPU: 1 PID: 0 at kernel/smp.c:753 smp_call_function_many_cond unwind_backtrace: show_stack dump_stack_lvl __warn warn_slowpath_fmt smp_call_function_many_cond smp_call_function crash_smp_send_stop.part.0 machine_crash_shutdown __crash_kexec panic softdog_fire __hrtimer_run_queues hrtimer_interrupt Make the smp call for machine_crash_nonpanic_core() asynchronous. Signed-off-by: Mårten Lindahl Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/machine_kexec.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 46364b699cc3..5d07cf9e0044 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -94,16 +94,28 @@ static void machine_crash_nonpanic_core(void *unused) } } +static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) = + CSD_INIT(machine_crash_nonpanic_core, NULL); + void crash_smp_send_stop(void) { static int cpus_stopped; unsigned long msecs; + call_single_data_t *csd; + int cpu, this_cpu = raw_smp_processor_id(); if (cpus_stopped) return; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - smp_call_function(machine_crash_nonpanic_core, NULL, false); + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + + csd = &per_cpu(cpu_stop_csd, cpu); + smp_call_function_single_async(cpu, csd); + } + msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); -- cgit From b0a6da43a510fdfff23fcda12f90fba37cab1c05 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 9 Aug 2023 00:44:06 +0100 Subject: ARM: 9318/1: locomo: move kernel-doc to prevent warnings Move the kernel-doc comments for locomo_probe() to immediately before that function instead of before __locomo_probe() to prevent kernel-doc warnings. Adjust the documented function parameters and make the return values compatible with ReST by adding bullets to them. Add more possible return values to the list. Prevents these warnings: arch/arm/common/locomo.c:368: warning: Function parameter or member 'me' not described in '__locomo_probe' arch/arm/common/locomo.c:368: warning: Function parameter or member 'mem' not described in '__locomo_probe' arch/arm/common/locomo.c:368: warning: Function parameter or member 'irq' not described in '__locomo_probe' arch/arm/common/locomo.c:368: warning: expecting prototype for locomo_probe(). Prototype was for __locomo_probe() instead Link: lore.kernel.org/r/202308050851.zAvHe6y7-lkp@intel.com Fixes: 5eb6e280432d ("ARM: 9289/1: Allow pre-ARMv5 builds with ld.lld 16.0.0 and newer") Signed-off-by: Randy Dunlap Reported-by: kernel test robot Cc: Nick Desaulniers Cc: Arnd Bergmann Cc: Nathan Chancellor Cc: linux-arm-kernel@lists.infradead.org Cc: patches@armlinux.org.uk Signed-off-by: Russell King (Oracle) --- arch/arm/common/locomo.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 309b74783468..70480dd9e96d 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -350,19 +350,6 @@ static int locomo_resume(struct platform_device *dev) } #endif - -/** - * locomo_probe - probe for a single LoCoMo chip. - * @phys_addr: physical address of device. - * - * Probe for a LoCoMo chip. This must be called - * before any other locomo-specific code. - * - * Returns: - * %-ENODEV device not found. - * %-EBUSY physical address already marked in-use. - * %0 successful. - */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { @@ -479,6 +466,21 @@ static void __locomo_remove(struct locomo *lchip) kfree(lchip); } +/** + * locomo_probe - probe for a single LoCoMo chip. + * @dev: platform device + * + * Probe for a LoCoMo chip. This must be called + * before any other locomo-specific code. + * + * Returns: + * * %-EINVAL - device's IORESOURCE_MEM not found + * * %-ENXIO - could not allocate an IRQ for the device + * * %-ENODEV - device not found. + * * %-EBUSY - physical address already marked in-use. + * * %-ENOMEM - could not allocate or iomap memory. + * * %0 - successful. + */ static int locomo_probe(struct platform_device *dev) { struct resource *mem; -- cgit From 133789d4a458c761f60ef71cc5a6ede5a617ed7e Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 14 Aug 2023 23:22:51 +0100 Subject: Revert part of ae1f8d793a19 ("ARM: 9304/1: add prototype for function called only from asm") The build bot reports: >> arch/arm/vfp/vfpmodule.c:324:13: error: static declaration of 'VFP_bounce' follows non-static declaration static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) ^ arch/arm/include/asm/vfp.h:105:6: note: previous declaration is here void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs); ^ 1 error generated. This is due to a merge conflict between commit ae1f8d793a19 ("ARM: 9304/1: add prototype for function called only from asm") and Ard's commit 4708fb041346 ("ARM: vfp: Reimplement VFP exception entry in C code"). Fix this by removing Arnd's change. No need to backport. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308150547.m54XHV12-lkp@intel.com/ Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/vfp.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h index 5b57b8768bac..157ea3426158 100644 --- a/arch/arm/include/asm/vfp.h +++ b/arch/arm/include/asm/vfp.h @@ -102,7 +102,6 @@ #ifndef __ASSEMBLY__ void vfp_disable(void); -void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs); #endif #endif /* __ASM_VFP_H */ -- cgit