summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S128
1 files changed, 88 insertions, 40 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1c59d4008a8..6d14b8f29b5f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,7 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
-#include <asm/memory.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
@@ -221,6 +221,8 @@ alternative_else_nop_endif
.macro kernel_exit, el
.if \el != 0
+ disable_daif
+
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
@@ -373,18 +375,18 @@ ENTRY(vectors)
kernel_ventry el1_sync // Synchronous EL1h
kernel_ventry el1_irq // IRQ EL1h
kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error_invalid // Error EL1h
+ kernel_ventry el1_error // Error EL1h
kernel_ventry el0_sync // Synchronous 64-bit EL0
kernel_ventry el0_irq // IRQ 64-bit EL0
kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry el0_error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
kernel_ventry el0_irq_compat // IRQ 32-bit EL0
kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry el0_error_compat // Error 32-bit EL0
#else
kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
@@ -453,10 +455,6 @@ ENDPROC(el0_error_invalid)
el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
-
-el0_error_invalid_compat:
- inv_entry 0, BAD_ERROR, 32
-ENDPROC(el0_error_invalid_compat)
#endif
el1_sync_invalid:
@@ -508,24 +506,18 @@ el1_da:
* Data abort handling
*/
mrs x3, far_el1
- enable_dbg
- // re-enable interrupts if they were enabled in the aborted context
- tbnz x23, #7, 1f // PSR_I_BIT
- enable_irq
-1:
+ inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
- // disable interrupts before pulling preserved data off the stack
- disable_irq
kernel_exit 1
el1_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
ASM_BUG()
@@ -533,7 +525,7 @@ el1_undef:
/*
* Undefined instruction
*/
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
ASM_BUG()
@@ -550,7 +542,7 @@ el1_dbg:
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
@@ -561,7 +553,7 @@ ENDPROC(el1_sync)
.align 6
el1_irq:
kernel_entry 1
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -607,6 +599,8 @@ el0_sync:
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
+ cmp x24, #ESR_ELx_EC_SVE // SVE access
+ b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
@@ -658,6 +652,7 @@ el0_svc_compat:
/*
* AArch32 syscall handling
*/
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, compat_sys_call_table // load compat syscall table pointer
mov wscno, w7 // syscall number in w7 (r7)
mov wsc_nr, #__NR_compat_syscalls
@@ -667,6 +662,10 @@ el0_svc_compat:
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+
+el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
#endif
el0_da:
@@ -674,8 +673,7 @@ el0_da:
* Data abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
clear_address_tag x0, x26
mov x1, x25
@@ -687,8 +685,7 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -699,17 +696,27 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
+el0_sve_acc:
+ /*
+ * Scalable Vector Extension access
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sve_acc
+ b ret_to_user
el0_fpsimd_exc:
/*
- * Floating Point or Advanced SIMD exception
+ * Floating Point, Advanced SIMD or SVE exception
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -720,8 +727,7 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -732,8 +738,7 @@ el0_undef:
/*
* Undefined instruction
*/
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, sp
bl do_undefinstr
@@ -742,7 +747,7 @@ el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -757,11 +762,11 @@ el0_dbg:
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_dbg
+ enable_daif
ct_user_exit
b ret_to_user
el0_inv:
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
@@ -774,7 +779,7 @@ ENDPROC(el0_sync)
el0_irq:
kernel_entry 0
el0_irq_naked:
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -788,12 +793,34 @@ el0_irq_naked:
b ret_to_user
ENDPROC(el0_irq)
+el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ kernel_exit 1
+ENDPROC(el1_error)
+
+el0_error:
+ kernel_entry 0
+el0_error_naked:
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ enable_daif
+ ct_user_exit
+ b ret_to_user
+ENDPROC(el0_error)
+
+
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
ret_fast_syscall:
- disable_irq // disable interrupts
+ disable_daif
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
@@ -803,7 +830,7 @@ ret_fast_syscall:
enable_step_tsk x1, x2
kernel_exit 0
ret_fast_syscall_trace:
- enable_irq // enable interrupts
+ enable_daif
b __sys_trace_return_skipped // we already saved x0
/*
@@ -821,7 +848,7 @@ work_pending:
* "slow" syscall return path.
*/
ret_to_user:
- disable_irq // disable interrupts
+ disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -835,16 +862,37 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, sys_call_table // load syscall table pointer
mov wscno, w8 // syscall number in w8
mov wsc_nr, #__NR_syscalls
+
+#ifdef CONFIG_ARM64_SVE
+alternative_if_not ARM64_SVE
+ b el0_svc_naked
+alternative_else_nop_endif
+ tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
+ bic x16, x16, #_TIF_SVE // discard SVE state
+ str x16, [tsk, #TSK_TI_FLAGS]
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case:
+ */
+ mrs x9, cpacr_el1
+ bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
+ msr cpacr_el1, x9 // synchronised by eret to el0
+#endif
+
el0_svc_naked: // compat entry point
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_dbg_and_irq
+ enable_daif
ct_user_exit 1
- ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
- tst x16, #_TIF_SYSCALL_WORK
+ tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys