From 6f496a555d93db7a11d4860b9220d904822f586a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 25 Jun 2019 19:08:54 +0200 Subject: arm64: kaslr: keep modules inside module region when KASAN is enabled When KASLR and KASAN are both enabled, we keep the modules where they are, and randomize the placement of the kernel so it is within 2 GB of the module region. The reason for this is that putting modules in the vmalloc region (like we normally do when KASLR is enabled) is not possible in this case, given that the entire vmalloc region is already backed by KASAN zero shadow pages, and so allocating dedicated KASAN shadow space as required by loaded modules is not possible. The default module allocation window is set to [_etext - 128MB, _etext] in kaslr.c, which is appropriate for KASLR kernels booted without a seed or with 'nokaslr' on the command line. However, as it turns out, it is not quite correct for the KASAN case, since it still intersects the vmalloc region at the top, where attempts to allocate shadow pages will collide with the KASAN zero shadow pages, causing a WARN() and all kinds of other trouble. So cap the top end to MODULES_END explicitly when running with KASAN. Cc: # 4.9+ Acked-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/module.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index dd080837e6a9..ed3706d6b3a0 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -32,6 +32,7 @@ void *module_alloc(unsigned long size) { + u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; gfp_t gfp_mask = GFP_KERNEL; void *p; @@ -39,9 +40,12 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; + if (IS_ENABLED(CONFIG_KASAN)) + /* don't exceed the static module region - see below */ + module_alloc_end = MODULES_END; + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + MODULES_VSIZE, - gfp_mask, PAGE_KERNEL_EXEC, 0, + module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && -- cgit From aa69fb62bea15126e744af2e02acc0d6cf3ed4da Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 25 Jun 2019 21:20:17 -0700 Subject: arm64/efi: Mark __efistub_stext_offset as an absolute symbol explicitly After r363059 and r363928 in LLVM, a build using ld.lld as the linker with CONFIG_RANDOMIZE_BASE enabled fails like so: ld.lld: error: relocation R_AARCH64_ABS32 cannot be used against symbol __efistub_stext_offset; recompile with -fPIC Fangrui and Peter figured out that ld.lld is incorrectly considering __efistub_stext_offset as a relative symbol because of the order in which symbols are evaluated. _text is treated as an absolute symbol and stext is a relative symbol, making __efistub_stext_offset a relative symbol. Adding ABSOLUTE will force ld.lld to evalute this expression in the right context and does not change ld.bfd's behavior. ld.lld will need to be fixed but the developers do not see a quick or simple fix without some research (see the linked issue for further explanation). Add this simple workaround so that ld.lld can continue to link kernels. Link: https://github.com/ClangBuiltLinux/linux/issues/561 Link: https://github.com/llvm/llvm-project/commit/025a815d75d2356f2944136269aa5874721ec236 Link: https://github.com/llvm/llvm-project/commit/249fde85832c33f8b06c6b4ac65d1c4b96d23b83 Acked-by: Ard Biesheuvel Debugged-by: Fangrui Song Debugged-by: Peter Smith Suggested-by: Fangrui Song Signed-off-by: Nathan Chancellor [will: add comment] Signed-off-by: Will Deacon --- arch/arm64/kernel/image.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 33f14e484040..b22e8ad071b1 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -78,7 +78,11 @@ #ifdef CONFIG_EFI -__efistub_stext_offset = stext - _text; +/* + * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: + * https://github.com/ClangBuiltLinux/linux/issues/561 + */ +__efistub_stext_offset = ABSOLUTE(stext - _text); /* * The EFI stub has its own symbol namespace prefixed by __efistub_, to -- cgit