summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S501
1 files changed, 295 insertions, 206 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 863d44f73028..f8018b5c1f9a 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -25,20 +25,11 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/scs.h>
+#include <asm/stacktrace/frame.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
-/*
- * Context tracking and irqflag tracing need to instrument transitions between
- * user and kernel mode.
- */
- .macro user_enter_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
- bl exit_to_user_mode
-#endif
- .endm
-
.macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr
@@ -47,21 +38,23 @@
.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+.Lventry_start\@:
.if \el == 0
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+ /*
+ * This must be the first instruction of the EL0 vector entries. It is
+ * skipped by the trampoline vectors, to trigger the cleanup.
+ */
+ b .Lskip_tramp_vectors_cleanup\@
.if \regsize == 64
mrs x30, tpidrro_el0
msr tpidrro_el0, xzr
.else
mov x30, xzr
.endif
-alternative_else_nop_endif
+.Lskip_tramp_vectors_cleanup\@:
.endif
-#endif
sub sp, sp, #PT_REGS_SIZE
-#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
@@ -103,13 +96,15 @@ alternative_else_nop_endif
/* We were already on the overflow stack. Restore sp/x0 and carry on. */
sub sp, sp, x0
mrs x0, tpidrro_el0
-#endif
b el\el\ht\()_\regsize\()_\label
+.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
- .macro tramp_alias, dst, sym
- mov_q \dst, TRAMP_VALIAS
- add \dst, \dst, #(\sym - .entry.tramp.text)
+ .macro tramp_alias, dst, sym
+ .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text
+ movz \dst, :abs_g2_s:.Lalias\@
+ movk \dst, :abs_g1_nc:.Lalias\@
+ movk \dst, :abs_g0_nc:.Lalias\@
.endm
/*
@@ -117,7 +112,7 @@ alternative_else_nop_endif
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-alternative_cb spectre_v4_patch_fw_mitigation_enable
+alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
b .L__asm_ssbd_skip\@ // Patched to NOP
alternative_cb_end
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
@@ -126,71 +121,73 @@ alternative_cb_end
tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
mov w1, #\state
-alternative_cb spectre_v4_patch_fw_mitigation_conduit
+alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
.endm
/* Check for MTE asynchronous tag check faults */
- .macro check_mte_async_tcf, tmp, ti_flags
+ .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
#ifdef CONFIG_ARM64_MTE
.arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
+ /*
+ * Asynchronous tag check faults are only possible in ASYNC (2) or
+ * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
+ * set, so skip the check if it is unset.
+ */
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
mov \tmp, #_TIF_MTE_ASYNC_FAULT
add \ti_flags, tsk, #TSK_TI_FLAGS
stset \tmp, [\ti_flags]
- msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
.endm
/* Clear the MTE asynchronous tag check faults */
- .macro clear_mte_async_tcf
+ .macro clear_mte_async_tcf thread_sctlr
#ifdef CONFIG_ARM64_MTE
alternative_if ARM64_MTE
+ /* See comment in check_mte_async_tcf above. */
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
dsb ish
msr_s SYS_TFSRE0_EL1, xzr
+1:
alternative_else_nop_endif
#endif
.endm
- .macro mte_set_gcr, tmp, tmp2
+ .macro mte_set_gcr, mte_ctrl, tmp
#ifdef CONFIG_ARM64_MTE
- /*
- * Calculate and set the exclude mask preserving
- * the RRND (bit[16]) setting.
- */
- mrs_s \tmp2, SYS_GCR_EL1
- bfi \tmp2, \tmp, #0, #16
- msr_s SYS_GCR_EL1, \tmp2
+ ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
+ orr \tmp, \tmp, #SYS_GCR_EL1_RRND
+ msr_s SYS_GCR_EL1, \tmp
#endif
.endm
.macro mte_set_kernel_gcr, tmp, tmp2
#ifdef CONFIG_KASAN_HW_TAGS
-alternative_if_not ARM64_MTE
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
-alternative_else_nop_endif
- ldr_l \tmp, gcr_kernel_excl
-
- mte_set_gcr \tmp, \tmp2
- isb
+alternative_cb_end
+ mov \tmp, KERNEL_GCR_EL1
+ msr_s SYS_GCR_EL1, \tmp
1:
#endif
.endm
.macro mte_set_user_gcr, tsk, tmp, tmp2
-#ifdef CONFIG_ARM64_MTE
-alternative_if_not ARM64_MTE
+#ifdef CONFIG_KASAN_HW_TAGS
+alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
b 1f
-alternative_else_nop_endif
- ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
+alternative_cb_end
+ ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2
1:
@@ -198,6 +195,9 @@ alternative_else_nop_endif
.endm
.macro kernel_entry, el, regsize = 64
+ .if \el == 0
+ alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT
+ .endif
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
@@ -231,8 +231,8 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
- check_mte_async_tcf x22, x23
- apply_ssbd 1, x22, x23
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ check_mte_async_tcf x22, x23, x0
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -245,7 +245,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
* was disabled on kernel exit then we would have left the kernel IA
* installed so there is no need to install it again.
*/
- ldr x0, [tsk, THREAD_SCTLR_USER]
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
b 2f
@@ -254,13 +253,27 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
orr x0, x0, SCTLR_ELx_ENIA
msr sctlr_el1, x0
2:
- isb
alternative_else_nop_endif
#endif
+ apply_ssbd 1, x22, x23
+
mte_set_kernel_gcr x22, x23
- scs_load tsk
+ /*
+ * Any non-self-synchronizing system register updates required for
+ * kernel entry should be placed before this point.
+ */
+alternative_if ARM64_MTE
+ isb
+ b 1f
+alternative_else_nop_endif
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ isb
+alternative_else_nop_endif
+1:
+
+ scs_load_current
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -270,15 +283,16 @@ alternative_else_nop_endif
stp lr, x21, [sp, #S_LR]
/*
- * For exceptions from EL0, create a final frame record.
- * For exceptions from EL1, create a synthetic frame record so the
- * interrupted code shows up in the backtrace.
+ * Create a metadata frame record. The unwinder will use this to
+ * identify and unwind exception boundaries.
*/
- .if \el == 0
stp xzr, xzr, [sp, #S_STACKFRAME]
+ .if \el == 0
+ mov x0, #FRAME_META_TYPE_FINAL
.else
- stp x29, x22, [sp, #S_STACKFRAME]
+ mov x0, #FRAME_META_TYPE_PT_REGS
.endif
+ str x0, [sp, #S_STACKFRAME_TYPE]
add x29, sp, #S_STACKFRAME
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
@@ -295,19 +309,17 @@ alternative_else_nop_endif
str w21, [sp, #S_SYSCALLNO]
.endif
- /* Save pmr */
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_save\@
+alternative_else_nop_endif
+
mrs_s x20, SYS_ICC_PMR_EL1
- str x20, [sp, #S_PMR_SAVE]
+ str w20, [sp, #S_PMR]
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, x20
-alternative_else_nop_endif
- /* Re-enable tag checking (TCO set on exception entry) */
-#ifdef CONFIG_ARM64_MTE
-alternative_if ARM64_MTE
- SET_PSTATE_TCO(0)
-alternative_else_nop_endif
+.Lskip_pmr_save\@:
#endif
/*
@@ -325,16 +337,22 @@ alternative_else_nop_endif
disable_daif
.endif
- /* Restore pmr */
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- ldr x20, [sp, #S_PMR_SAVE]
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_restore\@
+alternative_else_nop_endif
+
+ ldr w20, [sp, #S_PMR]
msr_s SYS_ICC_PMR_EL1, x20
- mrs_s x21, SYS_ICC_CTLR_EL1
- tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
- dsb sy // Ensure priority change is seen by redistributor
-.L__skip_pmr_sync\@:
+
+ /* Ensure priority change is seen by redistributor */
+alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC
+ dsb sy
alternative_else_nop_endif
+.Lskip_pmr_restore\@:
+#endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
@@ -362,6 +380,10 @@ alternative_else_nop_endif
3:
scs_save tsk
+ /* Ignore asynchronous tag check faults in the uaccess routines */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ clear_mte_async_tcf x0
+
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
/*
@@ -371,7 +393,6 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
*
* No kernel C function calls after this.
*/
- ldr x0, [tsk, THREAD_SCTLR_USER]
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_user tsk, x0, x1, x2
b 2f
@@ -405,26 +426,39 @@ alternative_else_nop_endif
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
- ldr lr, [sp, #S_LR]
- add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0
-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 4f
- msr far_el1, x30
- tramp_alias x30, tramp_exit_native
- br x30
-4:
- tramp_alias x30, tramp_exit_compat
- br x30
+ alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
+
+ msr far_el1, x29
+
+ ldr_this_cpu x30, this_cpu_vector, x29
+ tramp_alias x29, tramp_exit
+ msr vbar_el1, x30 // install vector table
+ ldr lr, [sp, #S_LR] // restore x30
+ add sp, sp, #PT_REGS_SIZE // restore sp
+ br x29
+
+.L_skip_tramp_exit_\@:
#endif
+ .endif
+
+ ldr lr, [sp, #S_LR]
+ add sp, sp, #PT_REGS_SIZE // restore sp
+
+ .if \el == 0
+ /* This must be after the last explicit memory access */
+alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ tlbi vale1, xzr
+ dsb nsh
+alternative_else_nop_endif
.else
/* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+ .endif
eret
- .endif
sb
.endm
@@ -474,18 +508,6 @@ SYM_CODE_END(__swpan_exit_el0)
/* GPRs used by entry code */
tsk .req x28 // current thread_info
-/*
- * Interrupt handling.
- */
- .macro gic_prio_kentry_setup, tmp:req
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
- msr_s SYS_ICC_PMR_EL1, \tmp
- alternative_else_nop_endif
-#endif
- .endm
-
.text
/*
@@ -497,7 +519,7 @@ tsk .req x28 // current thread_info
SYM_CODE_START(vectors)
kernel_ventry 1, t, 64, sync // Synchronous EL1t
kernel_ventry 1, t, 64, irq // IRQ EL1t
- kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, fiq // FIQ EL1t
kernel_ventry 1, t, 64, error // Error EL1t
kernel_ventry 1, h, 64, sync // Synchronous EL1h
@@ -516,13 +538,13 @@ SYM_CODE_START(vectors)
kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
-#ifdef CONFIG_VMAP_STACK
+SYM_CODE_START_LOCAL(__bad_stack)
/*
* We detected an overflow in kernel_ventry, which switched to the
* overflow stack. Stash the exception regs, and head to our overflow
* handler.
*/
-__bad_stack:
+
/* Restore the original x0 value */
mrs x0, tpidrro_el0
@@ -542,7 +564,7 @@ __bad_stack:
/* Time to die */
bl handle_bad_stack
ASM_BUG()
-#endif /* CONFIG_VMAP_STACK */
+SYM_CODE_END(__bad_stack)
.macro entry_handler el:req, ht:req, regsize:req, label:req
@@ -585,47 +607,17 @@ SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1
SYM_CODE_END(ret_to_kernel)
-/*
- * "slow" syscall return path.
- */
SYM_CODE_START_LOCAL(ret_to_user)
- disable_daif
- gic_prio_kentry_setup tmp=x3
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
- ldr x19, [tsk, #TSK_TI_FLAGS]
- and x2, x19, #_TIF_WORK_MASK
- cbnz x2, work_pending
-finish_ret_to_user:
- user_enter_irqoff
- /* Ignore asynchronous tag check faults in the uaccess routines */
- clear_mte_async_tcf
+ ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
enable_step_tsk x19, x2
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
- bl stackleak_erase
+#ifdef CONFIG_KSTACK_ERASE
+ bl stackleak_erase_on_task_stack
#endif
kernel_exit 0
-
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
-work_pending:
- mov x0, sp // 'regs'
- mov x1, x19
- bl do_notify_resume
- ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
- b finish_ret_to_user
SYM_CODE_END(ret_to_user)
.popsection // .entry.text
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-/*
- * Exception vectors trampoline.
- */
- .pushsection ".entry.tramp.text", "ax"
-
// Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1
@@ -659,12 +651,57 @@ alternative_else_nop_endif
*/
.endm
- .macro tramp_ventry, regsize = 64
+ .macro tramp_data_read_var dst, var
+#ifdef CONFIG_RELOCATABLE
+ ldr \dst, .L__tramp_data_\var
+ .ifndef .L__tramp_data_\var
+ .pushsection ".entry.tramp.rodata", "a", %progbits
+ .align 3
+.L__tramp_data_\var:
+ .quad \var
+ .popsection
+ .endif
+#else
+ /*
+ * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
+ * compile time constant (and hence not secret and not worth hiding).
+ *
+ * As statically allocated kernel code and data always live in the top
+ * 47 bits of the address space we can sign-extend bit 47 and avoid an
+ * instruction to load the upper 16 bits (which must be 0xFFFF).
+ */
+ movz \dst, :abs_g2_s:\var
+ movk \dst, :abs_g1_nc:\var
+ movk \dst, :abs_g0_nc:\var
+#endif
+ .endm
+
+#define BHB_MITIGATION_NONE 0
+#define BHB_MITIGATION_LOOP 1
+#define BHB_MITIGATION_FW 2
+#define BHB_MITIGATION_INSN 3
+
+ .macro tramp_ventry, vector_start, regsize, kpti, bhb
.align 7
1:
.if \regsize == 64
msr tpidrro_el0, x30 // Restored in kernel_ventry
.endif
+
+ .if \bhb == BHB_MITIGATION_LOOP
+ /*
+ * This sequence must appear before the first indirect branch. i.e. the
+ * ret out of tramp_ventry. It appears here because x30 is free.
+ */
+ __mitigate_spectre_bhb_loop x30
+ .endif // \bhb == BHB_MITIGATION_LOOP
+
+ .if \bhb == BHB_MITIGATION_INSN
+ clearbhb
+ isb
+ .endif // \bhb == BHB_MITIGATION_INSN
+
+ .if \kpti == 1
/*
* Defend against branch aliasing attacks by pushing a dummy
* entry onto the return stack and using a RET instruction to
@@ -674,67 +711,106 @@ alternative_else_nop_endif
b .
2:
tramp_map_kernel x30
-#ifdef CONFIG_RANDOMIZE_BASE
- adr x30, tramp_vectors + PAGE_SIZE
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
- ldr x30, [x30]
-#else
- ldr x30, =vectors
-#endif
+ tramp_data_read_var x30, vectors
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
- prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ prfm plil1strm, [x30, #(1b - \vector_start)]
alternative_else_nop_endif
+
msr vbar_el1, x30
- add x30, x30, #(1b - tramp_vectors)
isb
+ .else
+ adr_l x30, vectors
+ .endif // \kpti == 1
+
+ .if \bhb == BHB_MITIGATION_FW
+ /*
+ * The firmware sequence must appear before the first indirect branch.
+ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
+ * mapped to save/restore the registers the SMC clobbers.
+ */
+ __mitigate_spectre_bhb_fw
+ .endif // \bhb == BHB_MITIGATION_FW
+
+ add x30, x30, #(1b - \vector_start + 4)
ret
+.org 1b + 128 // Did we overflow the ventry slot?
.endm
- .macro tramp_exit, regsize = 64
- adr x30, tramp_vectors
- msr vbar_el1, x30
- tramp_unmap_kernel x30
- .if \regsize == 64
- mrs x30, far_el1
- .endif
- eret
- sb
+ .macro generate_tramp_vector, kpti, bhb
+.Lvector_start\@:
+ .space 0x400
+
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
+ .endr
.endm
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ * The order must match __bp_harden_el1_vectors and the
+ * arm64_bp_harden_el1_vectors enum.
+ */
+ .pushsection ".entry.tramp.text", "ax"
.align 11
-SYM_CODE_START_NOALIGN(tramp_vectors)
- .space 0x400
+SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
+SYM_CODE_END(tramp_vectors)
- tramp_ventry
- tramp_ventry
- tramp_ventry
- tramp_ventry
+SYM_CODE_START_LOCAL(tramp_exit)
+ tramp_unmap_kernel x29
+ mrs x29, far_el1 // restore x29
+ eret
+ sb
+SYM_CODE_END(tramp_exit)
+ .popsection // .entry.tramp.text
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
- tramp_ventry 32
- tramp_ventry 32
- tramp_ventry 32
- tramp_ventry 32
-SYM_CODE_END(tramp_vectors)
+/*
+ * Exception vectors for spectre mitigations on entry from EL1 when
+ * kpti is not in use.
+ */
+ .macro generate_el1_vector, bhb
+.Lvector_start\@:
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+ kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, error // Error EL1t
-SYM_CODE_START(tramp_exit_native)
- tramp_exit
-SYM_CODE_END(tramp_exit_native)
+ kernel_ventry 1, h, 64, sync // Synchronous EL1h
+ kernel_ventry 1, h, 64, irq // IRQ EL1h
+ kernel_ventry 1, h, 64, fiq // FIQ EL1h
+ kernel_ventry 1, h, 64, error // Error EL1h
-SYM_CODE_START(tramp_exit_compat)
- tramp_exit 32
-SYM_CODE_END(tramp_exit_compat)
+ .rept 4
+ tramp_ventry .Lvector_start\@, 64, 0, \bhb
+ .endr
+ .rept 4
+ tramp_ventry .Lvector_start\@, 32, 0, \bhb
+ .endr
+ .endm
+
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
+ .pushsection ".entry.text", "ax"
+ .align 11
+SYM_CODE_START(__bp_harden_el1_vectors)
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+ generate_el1_vector bhb=BHB_MITIGATION_LOOP
+ generate_el1_vector bhb=BHB_MITIGATION_FW
+ generate_el1_vector bhb=BHB_MITIGATION_INSN
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+SYM_CODE_END(__bp_harden_el1_vectors)
+ .popsection
- .ltorg
- .popsection // .entry.tramp.text
-#ifdef CONFIG_RANDOMIZE_BASE
- .pushsection ".rodata", "a"
- .align PAGE_SHIFT
-SYM_DATA_START(__entry_tramp_data_start)
- .quad vectors
-SYM_DATA_END(__entry_tramp_data_start)
- .popsection // .rodata
-#endif /* CONFIG_RANDOMIZE_BASE */
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
* Register switch for AArch64. The callee-saved registers need to be saved
@@ -745,6 +821,7 @@ SYM_DATA_END(__entry_tramp_data_start)
*
*/
SYM_FUNC_START(cpu_switch_to)
+ save_and_disable_daif x11
mov x10, #THREAD_CPU_CONTEXT
add x8, x0, x10
mov x9, sp
@@ -767,7 +844,8 @@ SYM_FUNC_START(cpu_switch_to)
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
scs_save x0
- scs_load x1
+ scs_load_current
+ restore_irq x11
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -781,6 +859,8 @@ SYM_CODE_START(ret_from_fork)
mov x0, x20
blr x19
1: get_current_task tsk
+ mov x0, sp
+ bl asm_exit_to_user_mode
b ret_to_user
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
@@ -792,31 +872,33 @@ NOKPROBE(ret_from_fork)
* Calls func(regs) using this CPU's irq stack and shadow irq stack.
*/
SYM_FUNC_START(call_on_irq_stack)
+ save_and_disable_daif x9
#ifdef CONFIG_SHADOW_CALL_STACK
- stp scs_sp, xzr, [sp, #-16]!
+ get_current_task x16
+ scs_save x16
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
#endif
+
/* Create a frame record to save our LR and SP (implicit in FP) */
stp x29, x30, [sp, #-16]!
mov x29, sp
ldr_this_cpu x16, irq_stack_ptr, x17
- mov x15, #IRQ_STACK_SIZE
- add x16, x16, x15
/* Move to the new stack and call the function there */
- mov sp, x16
+ add sp, x16, #IRQ_STACK_SIZE
+ restore_irq x9
blr x1
+ save_and_disable_daif x9
/*
* Restore the SP from the FP, and restore the FP and LR from the frame
* record.
*/
mov sp, x29
ldp x29, x30, [sp], #16
-#ifdef CONFIG_SHADOW_CALL_STACK
- ldp scs_sp, xzr, [sp], #16
-#endif
+ scs_load_current
+ restore_irq x9
ret
SYM_FUNC_END(call_on_irq_stack)
NOKPROBE(call_on_irq_stack)
@@ -845,7 +927,6 @@ NOKPROBE(call_on_irq_stack)
* This clobbers x4, __sdei_handler() will restore this from firmware's
* copy.
*/
-.ltorg
.pushsection ".entry.tramp.text", "ax"
SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1
@@ -859,14 +940,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
* Remember whether to unmap the kernel on exit.
*/
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
-
-#ifdef CONFIG_RANDOMIZE_BASE
- adr x4, tramp_vectors + PAGE_SIZE
- add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
- ldr x4, [x4]
-#else
- ldr x4, =__sdei_asm_handler
-#endif
+ tramp_data_read_var x4, __sdei_asm_handler
br x4
SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline)
@@ -887,15 +961,7 @@ SYM_CODE_START(__sdei_asm_exit_trampoline)
1: sdei_handler_exit exit_mode=x2
SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline)
- .ltorg
.popsection // .entry.tramp.text
-#ifdef CONFIG_RANDOMIZE_BASE
-.pushsection ".rodata", "a"
-SYM_DATA_START(__sdei_asm_trampoline_next_handler)
- .quad __sdei_asm_handler
-SYM_DATA_END(__sdei_asm_trampoline_next_handler)
-.popsection // .rodata
-#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
@@ -931,11 +997,14 @@ SYM_CODE_START(__sdei_asm_handler)
mov x19, x1
-#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
+ /* Store the registered-event for crash_smp_send_stop() */
ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
-#endif
+ cbnz w4, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str x19, [x5]
-#ifdef CONFIG_VMAP_STACK
/*
* entry.S may have been using sp as a scratch register, find whether
* this is a normal or critical event and switch to the appropriate
@@ -948,7 +1017,6 @@ SYM_CODE_START(__sdei_asm_handler)
2: mov x6, #SDEI_STACK_SIZE
add x5, x5, x6
mov sp, x5
-#endif
#ifdef CONFIG_SHADOW_CALL_STACK
/* Use a separate shadow call stack for normal and critical events */
@@ -990,14 +1058,24 @@ SYM_CODE_START(__sdei_asm_handler)
mov sp, x1
mov x1, x0 // address to complete_and_resume
- /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
- cmp x0, #1
+ /* x0 = (x0 <= SDEI_EV_FAILED) ?
+ * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
+ */
+ cmp x0, #SDEI_EV_FAILED
mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
csel x0, x2, x3, ls
ldr_l x2, sdei_exit_mode
+ /* Clear the registered-event seen by crash_smp_send_stop() */
+ ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
+ cbnz w3, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str xzr, [x5]
+
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
sdei_handler_exit exit_mode=x2
alternative_else_nop_endif
@@ -1008,4 +1086,15 @@ alternative_else_nop_endif
#endif
SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler)
+
+SYM_CODE_START(__sdei_handler_abort)
+ mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ adr x1, 1f
+ ldr_l x2, sdei_exit_mode
+ sdei_handler_exit exit_mode=x2
+ // exit the handler and jump to the next instruction.
+ // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
+1: ret
+SYM_CODE_END(__sdei_handler_abort)
+NOKPROBE(__sdei_handler_abort)
#endif /* CONFIG_ARM_SDE_INTERFACE */