summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/entry-header.S
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-04-20 09:41:31 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2022-05-20 12:32:32 +0100
commit508074607c7b95b24f0adf633fdf606761bb7824 (patch)
tree6bb3ce001060faa2c5da3c985e219c10e557330c /arch/arm/kernel/entry-header.S
parent952f03316352c606bebef56ba8f9642edbb8e348 (diff)
ARM: 9195/1: entry: avoid explicit literal loads
ARMv7 has MOVW/MOVT instruction pairs to load symbol addresses into registers without having to rely on literal loads that go via the D-cache. For older cores, we now support a similar arrangement, based on PC-relative group relocations. This means we can elide most literal loads entirely from the entry path, by switching to the ldr_va macro to emit the appropriate sequence depending on the target architecture revision. While at it, switch to the bl_r macro for invoking the right PABT/DABT helpers instead of setting the LR register explicitly, which does not play well with cores that speculate across function returns. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/kernel/entry-header.S')
-rw-r--r--arch/arm/kernel/entry-header.S3
1 files changed, 1 insertions, 2 deletions
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a1dc142f782..5865621bf691 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -48,8 +48,7 @@
.macro alignment_trap, rtmp1, rtmp2, label
#ifdef CONFIG_ALIGNMENT_TRAP
mrc p15, 0, \rtmp2, c1, c0, 0
- ldr \rtmp1, \label
- ldr \rtmp1, [\rtmp1]
+ ldr_va \rtmp1, \label
teq \rtmp1, \rtmp2
mcrne p15, 0, \rtmp1, c1, c0, 0
#endif