summaryrefslogtreecommitdiff
path: root/arch/arm/vfp
diff options
context:
space:
mode:
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2023-06-27 14:17:19 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2023-06-27 14:17:19 +0100
commit53ae158f6ddc14df5c44d62c06e33fdb66de1196 (patch)
treed0d0485f5f614e0070bab6ff1b53d56aec7c2d8e /arch/arm/vfp
parentac9a78681b921877518763ba0e89202254349d1b (diff)
parent47ba5f39eab3c2a9a1ba878159a6050f2bbfc0e2 (diff)
Merge tag 'arm-vfp-refactor-for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux into devel-stabledevel-stable
Refactor VFP support code and reimplement in C The VFP related changes to permit kernel mode NEON in softirq context resulted in some issues regarding en/disabling of sofirqs from asm code, and this made it clear that it would be better to handle more of it from C code. Given that we already have infrastructure that associates undefined instruction exceptions with handler code based on value/mask pairs, we can easily move the dispatch of VFP and NEON instructions to C code once we reimplement the actual VFP support routine (which reasons about how to deal with the exception and whether any emulation is needed) in C code first. With those out of the way, we can drop the partial decoding logic in asm that reasons about which ISA is being used by user space, as the remaining cases are all 32-bit ARM only. This leaves a FPE specific routine with some iWMMXT logic that is easily duplicated in C as well, allowing us to move the FPE asm code into the FPE asm source file, and out of the shared entry code.
Diffstat (limited to 'arch/arm/vfp')
-rw-r--r--arch/arm/vfp/Makefile2
-rw-r--r--arch/arm/vfp/entry.S28
-rw-r--r--arch/arm/vfp/vfp.h1
-rw-r--r--arch/arm/vfp/vfphw.S206
-rw-r--r--arch/arm/vfp/vfpmodule.c208
5 files changed, 162 insertions, 283 deletions
diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile
index 749901a72d6d..dfd64bc2b2fb 100644
--- a/arch/arm/vfp/Makefile
+++ b/arch/arm/vfp/Makefile
@@ -8,4 +8,4 @@
# ccflags-y := -DDEBUG
# asflags-y := -DDEBUG
-obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o
+obj-y += vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
deleted file mode 100644
index 7483ef8bccda..000000000000
--- a/arch/arm/vfp/entry.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/arch/arm/vfp/entry.S
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- */
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/thread_info.h>
-#include <asm/vfpmacros.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-
-@ VFP entry point.
-@
-@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
-@ r2 = PC value to resume execution after successful emulation
-@ r9 = normal "successful" return address
-@ r10 = this threads thread_info structure
-@ lr = unrecognised instruction return address
-@ IRQs enabled.
-@
-ENTRY(do_vfp)
- mov r1, r10
- mov r3, r9
- b vfp_entry
-ENDPROC(do_vfp)
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 5cd6d5053271..e43a630f8a16 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -375,3 +375,4 @@ struct op {
};
asmlinkage void vfp_save_state(void *location, u32 fpexc);
+asmlinkage u32 vfp_load_state(const void *location);
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 4d8478264d82..d5a03f3c10c5 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -4,12 +4,6 @@
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
- *
- * This code is called from the kernel's undefined instruction trap.
- * r1 holds the thread_info pointer
- * r3 holds the return address for successful handling.
- * lr holds the return address for unrecognised instructions.
- * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
*/
#include <linux/init.h>
#include <linux/linkage.h>
@@ -19,20 +13,6 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
- .macro DBGSTR, str
-#ifdef DEBUG
- stmfd sp!, {r0-r3, ip, lr}
- ldr r0, =1f
- bl _printk
- ldmfd sp!, {r0-r3, ip, lr}
-
- .pushsection .rodata, "a"
-1: .ascii KERN_DEBUG "VFP: \str\n"
- .byte 0
- .previous
-#endif
- .endm
-
.macro DBGSTR1, str, arg
#ifdef DEBUG
stmfd sp!, {r0-r3, ip, lr}
@@ -48,179 +28,25 @@
#endif
.endm
- .macro DBGSTR3, str, arg1, arg2, arg3
-#ifdef DEBUG
- stmfd sp!, {r0-r3, ip, lr}
- mov r3, \arg3
- mov r2, \arg2
- mov r1, \arg1
- ldr r0, =1f
- bl _printk
- ldmfd sp!, {r0-r3, ip, lr}
-
- .pushsection .rodata, "a"
-1: .ascii KERN_DEBUG "VFP: \str\n"
- .byte 0
- .previous
-#endif
- .endm
-
-
-@ VFP hardware support entry point.
-@
-@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
-@ r1 = thread_info pointer
-@ r2 = PC value to resume execution after successful emulation
-@ r3 = normal "successful" return address
-@ lr = unrecognised instruction return address
-@ IRQs enabled.
-ENTRY(vfp_support_entry)
- ldr r11, [r1, #TI_CPU] @ CPU number
- add r10, r1, #TI_VFPSTATE @ r10 = workspace
-
- DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
-
- .fpu vfpv2
- VFPFMRX r1, FPEXC @ Is the VFP enabled?
- DBGSTR1 "fpexc %08x", r1
- tst r1, #FPEXC_EN
- bne look_for_VFP_exceptions @ VFP is already enabled
-
- DBGSTR1 "enable %x", r10
- ldr r9, vfp_current_hw_state_address
- orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
- ldr r4, [r9, r11, lsl #2] @ vfp_current_hw_state pointer
- bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
- cmp r4, r10 @ this thread owns the hw context?
-#ifndef CONFIG_SMP
- @ For UP, checking that this thread owns the hw context is
- @ sufficient to determine that the hardware state is valid.
- beq vfp_hw_state_valid
-
- @ On UP, we lazily save the VFP context. As a different
- @ thread wants ownership of the VFP hardware, save the old
- @ state if there was a previous (valid) owner.
-
- VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
- @ exceptions, so we can get at the
- @ rest of it
-
- DBGSTR1 "save old state %p", r4
- cmp r4, #0 @ if the vfp_current_hw_state is NULL
- beq vfp_reload_hw @ then the hw state needs reloading
- VFPFSTMIA r4, r5 @ save the working registers
- VFPFMRX r5, FPSCR @ current status
-#ifndef CONFIG_CPU_FEROCEON
- tst r1, #FPEXC_EX @ is there additional state to save?
- beq 1f
- VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set)
- tst r1, #FPEXC_FP2V @ is there an FPINST2 to read?
- beq 1f
- VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present)
-1:
-#endif
- stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
-vfp_reload_hw:
-
-#else
- @ For SMP, if this thread does not own the hw context, then we
- @ need to reload it. No need to save the old state as on SMP,
- @ we always save the state when we switch away from a thread.
- bne vfp_reload_hw
-
- @ This thread has ownership of the current hardware context.
- @ However, it may have been migrated to another CPU, in which
- @ case the saved state is newer than the hardware context.
- @ Check this by looking at the CPU number which the state was
- @ last loaded onto.
- ldr ip, [r10, #VFP_CPU]
- teq ip, r11
- beq vfp_hw_state_valid
-
-vfp_reload_hw:
- @ We're loading this threads state into the VFP hardware. Update
- @ the CPU number which contains the most up to date VFP context.
- str r11, [r10, #VFP_CPU]
-
- VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
- @ exceptions, so we can get at the
- @ rest of it
-#endif
-
- DBGSTR1 "load state %p", r10
- str r10, [r9, r11, lsl #2] @ update the vfp_current_hw_state pointer
+ENTRY(vfp_load_state)
+ @ Load the current VFP state
+ @ r0 - load location
+ @ returns FPEXC
+ DBGSTR1 "load VFP state %p", r0
@ Load the saved state back into the VFP
- VFPFLDMIA r10, r5 @ reload the working registers while
+ VFPFLDMIA r0, r1 @ reload the working registers while
@ FPEXC is in a safe state
- ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
-#ifndef CONFIG_CPU_FEROCEON
- tst r1, #FPEXC_EX @ is there additional state to restore?
+ ldmia r0, {r0-r3} @ load FPEXC, FPSCR, FPINST, FPINST2
+ tst r0, #FPEXC_EX @ is there additional state to restore?
beq 1f
- VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set)
- tst r1, #FPEXC_FP2V @ is there an FPINST2 to write?
+ VFPFMXR FPINST, r2 @ restore FPINST (only if FPEXC.EX is set)
+ tst r0, #FPEXC_FP2V @ is there an FPINST2 to write?
beq 1f
- VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present)
+ VFPFMXR FPINST2, r3 @ FPINST2 if needed (and present)
1:
-#endif
- VFPFMXR FPSCR, r5 @ restore status
-
-@ The context stored in the VFP hardware is up to date with this thread
-vfp_hw_state_valid:
- tst r1, #FPEXC_EX
- bne process_exception @ might as well handle the pending
- @ exception before retrying branch
- @ out before setting an FPEXC that
- @ stops us reading stuff
- VFPFMXR FPEXC, r1 @ Restore FPEXC last
- sub r2, r2, #4 @ Retry current instruction - if Thumb
- str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
- @ else it's one 32-bit instruction, so
- @ always subtract 4 from the following
- @ instruction address.
-
- mov lr, r3 @ we think we have handled things
-local_bh_enable_and_ret:
- adr r0, .
- mov r1, #SOFTIRQ_DISABLE_OFFSET
- b __local_bh_enable_ip @ tail call
-
-look_for_VFP_exceptions:
- @ Check for synchronous or asynchronous exception
- tst r1, #FPEXC_EX | FPEXC_DEX
- bne process_exception
- @ On some implementations of the VFP subarch 1, setting FPSCR.IXE
- @ causes all the CDP instructions to be bounced synchronously without
- @ setting the FPEXC.EX bit
- VFPFMRX r5, FPSCR
- tst r5, #FPSCR_IXE
- bne process_exception
-
- tst r5, #FPSCR_LENGTH_MASK
- beq skip
- orr r1, r1, #FPEXC_DEX
- b process_exception
-skip:
-
- @ Fall into hand on to next handler - appropriate coproc instr
- @ not recognised by VFP
-
- DBGSTR "not VFP"
- b local_bh_enable_and_ret
-
-process_exception:
- DBGSTR "bounce"
- mov r2, sp @ nothing stacked - regdump is at TOS
- mov lr, r3 @ setup for a return to the user code.
-
- @ Now call the C code to package up the bounce to the support code
- @ r0 holds the trigger instruction
- @ r1 holds the FPEXC value
- @ r2 pointer to register dump
- b VFP_bounce @ we have handled this - the support
- @ code will raise an exception if
- @ required. If not, the user code will
- @ retry the faulted instruction
-ENDPROC(vfp_support_entry)
+ VFPFMXR FPSCR, r1 @ restore status
+ ret lr
+ENDPROC(vfp_load_state)
ENTRY(vfp_save_state)
@ Save the current VFP state
@@ -240,10 +66,6 @@ ENTRY(vfp_save_state)
ret lr
ENDPROC(vfp_save_state)
- .align
-vfp_current_hw_state_address:
- .word vfp_current_hw_state
-
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
adr \tmp, 1f
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 349dcb944a93..58a9442add24 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <linux/user.h>
#include <linux/export.h>
+#include <linux/perf_event.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -29,11 +30,6 @@
#include "vfpinstr.h"
#include "vfp.h"
-/*
- * Our undef handlers (in entry.S)
- */
-asmlinkage void vfp_support_entry(u32, void *, u32, u32);
-
static bool have_vfp __ro_after_init;
/*
@@ -41,7 +37,11 @@ static bool have_vfp __ro_after_init;
* Used in startup: set to non-zero if VFP checks fail
* After startup, holds VFP architecture
*/
-static unsigned int __initdata VFP_arch;
+static unsigned int VFP_arch;
+
+#ifdef CONFIG_CPU_FEROCEON
+extern unsigned int VFP_arch_feroceon __alias(VFP_arch);
+#endif
/*
* The pointer to the vfpstate structure of the thread which currently
@@ -313,13 +313,14 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
* emulate it.
*/
}
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
return exceptions & ~VFP_NAN_FLAG;
}
/*
* Package up a bounce condition.
*/
-void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
u32 fpscr, orig_fpscr, fpsid, exceptions;
@@ -355,14 +356,12 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
}
if (fpexc & FPEXC_EX) {
-#ifndef CONFIG_CPU_FEROCEON
/*
* Asynchronous exception. The instruction is read from FPINST
* and the interrupted instruction has to be restarted.
*/
trigger = fmrx(FPINST);
regs->ARM_pc -= 4;
-#endif
} else if (!(fpexc & FPEXC_DEX)) {
/*
* Illegal combination of bits. It can be caused by an
@@ -370,7 +369,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* on VFP subarch 1.
*/
vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
- goto exit;
+ return;
}
/*
@@ -401,7 +400,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
- goto exit;
+ return;
/*
* The barrier() here prevents fpinst2 being read
@@ -414,8 +413,6 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
- exit:
- local_bh_enable();
}
static void vfp_enable(void *unused)
@@ -644,27 +641,6 @@ static int vfp_starting_cpu(unsigned int unused)
return 0;
}
-/*
- * Entered with:
- *
- * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
- * r1 = thread_info pointer
- * r2 = PC value to resume execution after successful emulation
- * r3 = normal "successful" return address
- * lr = unrecognised instruction return address
- */
-asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc,
- u32 resume_return_address)
-{
- if (unlikely(!have_vfp))
- return;
-
- local_bh_disable();
- vfp_support_entry(trigger, ti, resume_pc, resume_return_address);
-}
-
-#ifdef CONFIG_KERNEL_MODE_NEON
-
static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
{
/*
@@ -687,47 +663,151 @@ static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr)
return 1;
}
-static struct undef_hook vfp_kmode_exception_hook[] = {{
+/*
+ * vfp_support_entry - Handle VFP exception
+ *
+ * @regs: pt_regs structure holding the register state at exception entry
+ * @trigger: The opcode of the instruction that triggered the exception
+ *
+ * Returns 0 if the exception was handled, or an error code otherwise.
+ */
+static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
+{
+ struct thread_info *ti = current_thread_info();
+ u32 fpexc;
+
+ if (unlikely(!have_vfp))
+ return -ENODEV;
+
+ if (!user_mode(regs))
+ return vfp_kmode_exception(regs, trigger);
+
+ local_bh_disable();
+ fpexc = fmrx(FPEXC);
+
+ /*
+ * If the VFP unit was not enabled yet, we have to check whether the
+ * VFP state in the CPU's registers is the most recent VFP state
+ * associated with the process. On UP systems, we don't save the VFP
+ * state eagerly on a context switch, so we may need to save the
+ * VFP state to memory first, as it may belong to another process.
+ */
+ if (!(fpexc & FPEXC_EN)) {
+ /*
+ * Enable the VFP unit but mask the FP exception flag for the
+ * time being, so we can access all the registers.
+ */
+ fpexc |= FPEXC_EN;
+ fmxr(FPEXC, fpexc & ~FPEXC_EX);
+
+ /*
+ * Check whether or not the VFP state in the CPU's registers is
+ * the most recent VFP state associated with this task. On SMP,
+ * migration may result in multiple CPUs holding VFP states
+ * that belong to the same task, but only the most recent one
+ * is valid.
+ */
+ if (!vfp_state_in_hw(ti->cpu, ti)) {
+ if (!IS_ENABLED(CONFIG_SMP) &&
+ vfp_current_hw_state[ti->cpu] != NULL) {
+ /*
+ * This CPU is currently holding the most
+ * recent VFP state associated with another
+ * task, and we must save that to memory first.
+ */
+ vfp_save_state(vfp_current_hw_state[ti->cpu],
+ fpexc);
+ }
+
+ /*
+ * We can now proceed with loading the task's VFP state
+ * from memory into the CPU registers.
+ */
+ fpexc = vfp_load_state(&ti->vfpstate);
+ vfp_current_hw_state[ti->cpu] = &ti->vfpstate;
+#ifdef CONFIG_SMP
+ /*
+ * Record that this CPU is now the one holding the most
+ * recent VFP state of the task.
+ */
+ ti->vfpstate.hard.cpu = ti->cpu;
+#endif
+ }
+
+ if (fpexc & FPEXC_EX)
+ /*
+ * Might as well handle the pending exception before
+ * retrying branch out before setting an FPEXC that
+ * stops us reading stuff.
+ */
+ goto bounce;
+
+ /*
+ * No FP exception is pending: just enable the VFP and
+ * replay the instruction that trapped.
+ */
+ fmxr(FPEXC, fpexc);
+ } else {
+ /* Check for synchronous or asynchronous exceptions */
+ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
+ u32 fpscr = fmrx(FPSCR);
+
+ /*
+ * On some implementations of the VFP subarch 1,
+ * setting FPSCR.IXE causes all the CDP instructions to
+ * be bounced synchronously without setting the
+ * FPEXC.EX bit
+ */
+ if (!(fpscr & FPSCR_IXE)) {
+ if (!(fpscr & FPSCR_LENGTH_MASK)) {
+ pr_debug("not VFP\n");
+ local_bh_enable();
+ return -ENOEXEC;
+ }
+ fpexc |= FPEXC_DEX;
+ }
+ }
+bounce: regs->ARM_pc += 4;
+ VFP_bounce(trigger, fpexc, regs);
+ }
+
+ local_bh_enable();
+ return 0;
+}
+
+static struct undef_hook neon_support_hook[] = {{
.instr_mask = 0xfe000000,
.instr_val = 0xf2000000,
- .cpsr_mask = MODE_MASK | PSR_T_BIT,
- .cpsr_val = SVC_MODE,
- .fn = vfp_kmode_exception,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = 0,
+ .fn = vfp_support_entry,
}, {
.instr_mask = 0xff100000,
.instr_val = 0xf4000000,
- .cpsr_mask = MODE_MASK | PSR_T_BIT,
- .cpsr_val = SVC_MODE,
- .fn = vfp_kmode_exception,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = 0,
+ .fn = vfp_support_entry,
}, {
.instr_mask = 0xef000000,
.instr_val = 0xef000000,
- .cpsr_mask = MODE_MASK | PSR_T_BIT,
- .cpsr_val = SVC_MODE | PSR_T_BIT,
- .fn = vfp_kmode_exception,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = PSR_T_BIT,
+ .fn = vfp_support_entry,
}, {
.instr_mask = 0xff100000,
.instr_val = 0xf9000000,
- .cpsr_mask = MODE_MASK | PSR_T_BIT,
- .cpsr_val = SVC_MODE | PSR_T_BIT,
- .fn = vfp_kmode_exception,
-}, {
- .instr_mask = 0x0c000e00,
- .instr_val = 0x0c000a00,
- .cpsr_mask = MODE_MASK,
- .cpsr_val = SVC_MODE,
- .fn = vfp_kmode_exception,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = PSR_T_BIT,
+ .fn = vfp_support_entry,
}};
-static int __init vfp_kmode_exception_hook_init(void)
-{
- int i;
+static struct undef_hook vfp_support_hook = {
+ .instr_mask = 0x0c000e00,
+ .instr_val = 0x0c000a00,
+ .fn = vfp_support_entry,
+};
- for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++)
- register_undef_hook(&vfp_kmode_exception_hook[i]);
- return 0;
-}
-subsys_initcall(vfp_kmode_exception_hook_init);
+#ifdef CONFIG_KERNEL_MODE_NEON
/*
* Kernel-side NEON support functions
@@ -832,8 +912,11 @@ static int __init vfp_init(void)
* for NEON if the hardware has the MVFR registers.
*/
if (IS_ENABLED(CONFIG_NEON) &&
- (fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ (fmrx(MVFR1) & 0x000fff00) == 0x00011100) {
elf_hwcap |= HWCAP_NEON;
+ for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++)
+ register_undef_hook(&neon_support_hook[i]);
+ }
if (IS_ENABLED(CONFIG_VFPv3)) {
u32 mvfr0 = fmrx(MVFR0);
@@ -902,6 +985,7 @@ static int __init vfp_init(void)
have_vfp = true;
+ register_undef_hook(&vfp_support_hook);
thread_register_notifier(&vfp_notifier_block);
vfp_pm_init();