diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2021-11-26 19:37:27 +0100 |
---|---|---|
committer | Ard Biesheuvel <ardb@kernel.org> | 2021-12-06 12:49:16 +0100 |
commit | 4e918ab13eaf40f19938659cb5a22c93172778a8 (patch) | |
tree | 0df6541cd53851dd56dc5a8013c29378b890d261 | |
parent | 1fa8c4b19543ae8c8894ec92a18696c9f9b03fc8 (diff) |
ARM: assembler: add optimized ldr/str macros to load variables from memory
We will be adding variable loads to various hot paths, so it makes sense
to add a helper macro that can load variables from asm code without the
use of literal pool entries. On v7 or later, we can simply use MOVW/MOVT
pairs, but on earlier cores, this requires a bit of hackery to emit a
instruction sequence that implements this using a sequence of ADD/LDR
instructions.
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
-rw-r--r-- | arch/arm/include/asm/assembler.h | 45 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 2 |
3 files changed, 43 insertions, 6 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 1b9d4df331aa..2095638b7140 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -568,12 +568,12 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) /* * mov_l - move a constant value or [relocated] address into a register */ - .macro mov_l, dst:req, imm:req + .macro mov_l, dst:req, imm:req, cond .if __LINUX_ARM_ARCH__ < 7 - ldr \dst, =\imm + ldr\cond \dst, =\imm .else - movw \dst, #:lower16:\imm - movt \dst, #:upper16:\imm + movw\cond \dst, #:lower16:\imm + movt\cond \dst, #:upper16:\imm .endif .endm @@ -611,6 +611,43 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) __adldst_l str, \src, \sym, \tmp, \cond .endm + .macro __ldst_va, op, reg, tmp, sym, cond +#if __LINUX_ARM_ARCH__ >= 7 || \ + (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS)) || \ + (defined(CONFIG_LD_IS_LLD) && CONFIG_LLD_VERSION < 140000) + mov_l \tmp, \sym, \cond + \op\cond \reg, [\tmp] +#else + /* + * Avoid a literal load, by emitting a sequence of ADD/LDR instructions + * with the appropriate relocations. The combined sequence has a range + * of -/+ 256 MiB, which should be sufficient for the core kernel and + * for modules loaded into the module region. + */ + .globl \sym + .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym + .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym + .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym +.L0_\@: sub\cond \tmp, pc, #8 +.L1_\@: sub\cond \tmp, \tmp, #4 +.L2_\@: \op\cond \reg, [\tmp, #0] +#endif + .endm + + /* + * ldr_va - load a 32-bit word from the virtual address of \sym + */ + .macro ldr_va, rd:req, sym:req, cond + __ldst_va ldr, \rd, \rd, \sym, \cond + .endm + + /* + * str_va - store a 32-bit word to the virtual address of \sym + */ + .macro str_va, rn:req, sym:req, tmp:req, cond + __ldst_va str, \rn, \tmp, \sym, \cond + .endm + /* * rev_l - byte-swap a 32-bit value * diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1a6cf711a3b4..7f7ac963445c 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -53,7 +53,7 @@ UNWIND( .setfp fpreg, sp ) subs r2, sp, r0 @ SP above bottom of IRQ stack? rsbscs r2, r2, #THREAD_SIZE @ ... and below the top? #ifdef CONFIG_VMAP_STACK - ldr_l r2, high_memory, cc @ End of the linear region + ldr_va r2, high_memory, cc @ End of the linear region cmpcc r2, r0 @ Stack pointer was below it? #endif movcs sp, r0 @ If so, revert to incoming SP diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 81df2a3561ca..268f7f4c5c05 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -445,7 +445,7 @@ THUMB( it ne ) @ in such cases so just carry on. @ str ip, [r0, #12] @ Stash IP on the mode stack - ldr_l ip, high_memory @ Start of VMALLOC space + ldr_va ip, high_memory @ Start of VMALLOC space ARM( cmp sp, ip ) @ SP in vmalloc space? THUMB( cmp r1, ip ) THUMB( itt lo ) |