summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2024-01-16 11:02:21 +0000
committerWill Deacon <will@kernel.org>2024-01-18 11:00:09 +0000
commitda59f1d051d57e85eca49401a3a36d5a622babde (patch)
treed937594d6f27b2ab49e62c98dd6560aa9d1ca517 /arch
parent832dd634bd1b4e3bbe9f10b9c9ba5db6f6f2b97f (diff)
arm64: entry: simplify kernel_exit logic
For historical reasons, the non-KPTI exception return path is duplicated for EL1 and EL0, with the structure: .if \el == 0 [ KPTI handling ] ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp [ EL0 exception return workaround ] eret .else ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp [ EL1 exception return workaround ] eret .endif sb This would be simpler and clearer with the common portions factored out, e.g. .if \el == 0 [ KPTI handling ] .endif ldr lr, [sp, #S_LR] add sp, sp, #PT_REGS_SIZE // restore sp .if \el == 0 [ EL0 exception return workaround ] .else [ EL1 exception return workaround ] .endif eret sb This expands to the same code, but is simpler for a human to follow as it avoids duplicates the restore of LR+SP, and makes it clear that the ERET is associated with the SB. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Rob Herring <robh@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20240116110221.420467-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/entry.S9
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7fcbee0f6c0e..7ef0e127b149 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -442,24 +442,23 @@ alternative_else_nop_endif
.L_skip_tramp_exit_\@:
#endif
+ .endif
+
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp
+ .if \el == 0
/* This must be after the last explicit memory access */
alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
tlbi vale1, xzr
dsb nsh
alternative_else_nop_endif
- eret
.else
- ldr lr, [sp, #S_LR]
- add sp, sp, #PT_REGS_SIZE // restore sp
-
/* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+ .endif
eret
- .endif
sb
.endm