From 8339f7d8e178d9c933f437d14be0a5fd1359f53d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 May 2023 12:03:23 +0100 Subject: arm64: module: remove old !KASAN_VMALLOC logic Historically, KASAN could be selected with or without KASAN_VMALLOC, and we had to be very careful where to place modules when KASAN_VMALLOC was not selected. However, since commit: f6f37d9320a11e90 ("arm64: select KASAN_VMALLOC for SW/HW_TAGS modes") Selecting CONFIG_KASAN on arm64 will also select CONFIG_KASAN_VMALLOC, and so the logic for handling CONFIG_KASAN without CONFIG_KASAN_VMALLOC is redundant and can be removed. Note: the "kasan.vmalloc={on,off}" option which only exists for HW_TAGS changes whether the vmalloc region is given non-match-all tags, and does not affect the page table manipulation code. The VM_DEFER_KMEMLEAK flag was only necessary for !CONFIG_KASAN_VMALLOC as described in its introduction in commit: 60115fa54ad7b913 ("mm: defer kmemleak object creation of module_alloc()") ... and therefore it can also be removed. Remove the redundant logic for !CONFIG_KASAN_VMALLOC. At the same time, add the missing braces around the multi-line conditional block in arch/arm64/kernel/module.c. Suggested-by: Ard Biesheuvel Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Cc: Alexander Potapenko Cc: Andrew Morton Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Will Deacon Tested-by: Shanker Donthineni Link: https://lore.kernel.org/r/20230530110328.2213762-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/module.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) (limited to 'arch/arm64/kernel/module.c') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 5af4975caeb5..b2657ac41226 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -32,33 +32,16 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; - if (IS_ENABLED(CONFIG_KASAN_GENERIC) || - IS_ENABLED(CONFIG_KASAN_SW_TAGS)) - /* don't exceed the static module region - see below */ - module_alloc_end = MODULES_END; - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, + module_alloc_end, gfp_mask, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - (IS_ENABLED(CONFIG_KASAN_VMALLOC) || - (!IS_ENABLED(CONFIG_KASAN_GENERIC) && - !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) - /* - * KASAN without KASAN_VMALLOC can only deal with module - * allocations being served from the reserved module region, - * since the remainder of the vmalloc region is already - * backed by zero shadow pages, and punching holes into it - * is non-trivial. Since the module region is not randomized - * when KASAN is enabled without KASAN_VMALLOC, it is even - * less likely that the module region gets exhausted, so we - * can simply omit this fallback in that case. - */ + if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) { p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); + } if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); -- cgit From e46b7103aef39c3f421f0bff7a10ae5a29cd5cee Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 May 2023 12:03:26 +0100 Subject: arm64: module: move module randomization to module.c When CONFIG_RANDOMIZE_BASE=y, module_alloc_base is a variable which is configured by kaslr_module_init() in kaslr.c, and otherwise it is an expression defined in module.h. As kaslr_module_init() is no longer tightly coupled with the KASLR initialization code, we can centralize this in module.c. This patch moves kaslr_module_init() to module.c, making module_alloc_base a static variable, and removing redundant includes from kaslr.c. For the defintion of struct arm64_ftr_override we must include , which was previously included transitively via another header. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Cc: Will Deacon Tested-by: Shanker Donthineni Link: https://lore.kernel.org/r/20230530110328.2213762-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/module.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'arch/arm64/kernel/module.c') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index b2657ac41226..51fd8abd9b74 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -15,13 +15,61 @@ #include #include #include +#include #include #include + #include #include #include #include +static u64 __ro_after_init module_alloc_base = (u64)_etext - MODULES_VSIZE; + +#ifdef CONFIG_RANDOMIZE_BASE +static int __init kaslr_module_init(void) +{ + u64 module_range; + u32 seed; + + if (!kaslr_enabled()) + return 0; + + seed = get_random_u32(); + + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { + /* + * Randomize the module region over a 2 GB window covering the + * kernel. This reduces the risk of modules leaking information + * about the address of the kernel itself, but results in + * branches between modules and the core kernel that are + * resolved via PLTs. (Branches between modules will be + * resolved normally.) + */ + module_range = SZ_2G - (u64)(_end - _stext); + module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR); + } else { + /* + * Randomize the module region by setting module_alloc_base to + * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, + * _stext) . This guarantees that the resulting region still + * covers [_stext, _etext], and that all relative branches can + * be resolved without veneers unless this region is exhausted + * and we fall back to a larger 2GB window in module_alloc() + * when ARM64_MODULE_PLTS is enabled. + */ + module_range = MODULES_VSIZE - (u64)(_etext - _stext); + } + + /* use the lower 21 bits to randomize the base of the module region */ + module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; + module_alloc_base &= PAGE_MASK; + + return 0; +} +subsys_initcall(kaslr_module_init) +#endif + void *module_alloc(unsigned long size) { u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; -- cgit From ea3752ba9685b47db4571ddaee39344cf2b0bf45 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 May 2023 12:03:27 +0100 Subject: arm64: module: mandate MODULE_PLTS Contemporary kernels and modules can be relatively large, especially when common debug options are enabled. Using GCC 12.1.0, a v6.3-rc7 defconfig kernel is ~38M, and with PROVE_LOCKING + KASAN_INLINE enabled this expands to ~117M. Shanker reports [1] that the NVIDIA GPU driver alone can consume 110M of module space in some configurations. Both KASLR and ARM64_ERRATUM_843419 select MODULE_PLTS, so anyone wanting a kernel to have KASLR or run on Cortex-A53 will have MODULE_PLTS selected. This is the case in defconfig and distribution kernels (e.g. Debian, Android, etc). Practically speaking, this means we're very likely to need MODULE_PLTS and while it's almost guaranteed that MODULE_PLTS will be selected, it is possible to disable support, and we have to maintain some awkward special cases for such unusual configurations. This patch removes the MODULE_PLTS config option, with the support code always enabled if MODULES is selected. This results in a slight simplification, and will allow for further improvement in subsequent patches. For any config which currently selects MODULE_PLTS, there will be no functional change as a result of this patch. [1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/ Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Cc: Shanker Donthineni Cc: Will Deacon Tested-by: Shanker Donthineni Link: https://lore.kernel.org/r/20230530110328.2213762-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/module.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'arch/arm64/kernel/module.c') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 51fd8abd9b74..f64636c2fd05 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -73,25 +73,26 @@ subsys_initcall(kaslr_module_init) void *module_alloc(unsigned long size) { u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; - gfp_t gfp_mask = GFP_KERNEL; void *p; - /* Silence the initial allocation */ - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - gfp_mask |= __GFP_NOWARN; - + /* + * Where possible, prefer to allocate within direct branch range of the + * kernel such that no PLTs are necessary. This may fail, so we pass + * __GFP_NOWARN to silence the resulting warning. + */ p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_end, gfp_mask, PAGE_KERNEL, 0, - NUMA_NO_NODE, __builtin_return_address(0)); + module_alloc_end, GFP_KERNEL | __GFP_NOWARN, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) { + if (!p) { p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, module_alloc_base + SZ_2G, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } - if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { + if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) { vfree(p); return NULL; } @@ -479,9 +480,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_AARCH64_CALL26: ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, AARCH64_INSN_IMM_26); - - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - ovf == -ERANGE) { + if (ovf == -ERANGE) { val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); if (!val) return -ENOEXEC; @@ -518,7 +517,7 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { -#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) +#if defined(CONFIG_DYNAMIC_FTRACE) const Elf_Shdr *s; struct plt_entry *plts; -- cgit From 3e35d303ab7d22c4b6597e56ba46ee7cc61f3a5a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 30 May 2023 12:03:28 +0100 Subject: arm64: module: rework module VA range selection Currently, the modules region is 128M in size, which is a problem for some large modules. Shanker reports [1] that the NVIDIA GPU driver alone can consume 110M of module space in some configurations. We'd like to make the modules region a full 2G such that we can always make use of a 2G range. It's possible to build kernel images which are larger than 128M in some configurations, such as when many debug options are selected and many drivers are built in. In these configurations, we can't legitimately select a base for a 128M module region, though we currently select a value for which allocation will fail. It would be nicer to have a diagnostic message in this case. Similarly, in theory it's possible to build a kernel image which is larger than 2G and which cannot support modules. While this isn't likely to be the case for any realistic kernel deplyed in the field, it would be nice if we could print a diagnostic in this case. This patch reworks the module VA range selection to use a 2G range, and improves handling of cases where we cannot select legitimate module regions. We now attempt to select a 128M region and a 2G region: * The 128M region is selected such that modules can use direct branches (with JUMP26/CALL26 relocations) to branch to kernel code and other modules, and so that modules can reference data and text (using PREL32 relocations) anywhere in the kernel image and other modules. This region covers the entire kernel image (rather than just the text) to ensure that all PREL32 relocations are in range even when the kernel data section is absurdly large. Where we cannot allocate from this region, we'll fall back to the full 2G region. * The 2G region is selected such that modules can use direct branches with PLTs to branch to kernel code and other modules, and so that modules can use reference data and text (with PREL32 relocations) in the kernel image and other modules. This region covers the entire kernel image, and the 128M region (if one is selected). The two module regions are randomized independently while ensuring the constraints described above. [1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/ Signed-off-by: Mark Rutland Reviewed-by: Ard Biesheuvel Cc: Shanker Donthineni Cc: Will Deacon Tested-by: Shanker Donthineni Link: https://lore.kernel.org/r/20230530110328.2213762-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/module.c | 139 ++++++++++++++++++++++++++++++--------------- 1 file changed, 94 insertions(+), 45 deletions(-) (limited to 'arch/arm64/kernel/module.c') diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index f64636c2fd05..dd851297596e 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -7,6 +7,8 @@ * Author: Will Deacon */ +#define pr_fmt(fmt) "Modules: " fmt + #include #include #include @@ -24,72 +26,119 @@ #include #include -static u64 __ro_after_init module_alloc_base = (u64)_etext - MODULES_VSIZE; +static u64 module_direct_base __ro_after_init = 0; +static u64 module_plt_base __ro_after_init = 0; -#ifdef CONFIG_RANDOMIZE_BASE -static int __init kaslr_module_init(void) +/* + * Choose a random page-aligned base address for a window of 'size' bytes which + * entirely contains the interval [start, end - 1]. + */ +static u64 __init random_bounding_box(u64 size, u64 start, u64 end) { - u64 module_range; - u32 seed; + u64 max_pgoff, pgoff; - if (!kaslr_enabled()) + if ((end - start) >= size) return 0; - seed = get_random_u32(); + max_pgoff = (size - (end - start)) / PAGE_SIZE; + pgoff = get_random_u32_inclusive(0, max_pgoff); - if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { - /* - * Randomize the module region over a 2 GB window covering the - * kernel. This reduces the risk of modules leaking information - * about the address of the kernel itself, but results in - * branches between modules and the core kernel that are - * resolved via PLTs. (Branches between modules will be - * resolved normally.) - */ - module_range = SZ_2G - (u64)(_end - _stext); - module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR); + return start - pgoff * PAGE_SIZE; +} + +/* + * Modules may directly reference data and text anywhere within the kernel + * image and other modules. References using PREL32 relocations have a +/-2G + * range, and so we need to ensure that the entire kernel image and all modules + * fall within a 2G window such that these are always within range. + * + * Modules may directly branch to functions and code within the kernel text, + * and to functions and code within other modules. These branches will use + * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure + * that the entire kernel text and all module text falls within a 128M window + * such that these are always within range. With PLTs, we can expand this to a + * 2G window. + * + * We chose the 128M region to surround the entire kernel image (rather than + * just the text) as using the same bounds for the 128M and 2G regions ensures + * by construction that we never select a 128M region that is not a subset of + * the 2G region. For very large and unusual kernel configurations this means + * we may fall back to PLTs where they could have been avoided, but this keeps + * the logic significantly simpler. + */ +static int __init module_init_limits(void) +{ + u64 kernel_end = (u64)_end; + u64 kernel_start = (u64)_text; + u64 kernel_size = kernel_end - kernel_start; + + /* + * The default modules region is placed immediately below the kernel + * image, and is large enough to use the full 2G relocation range. + */ + BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END); + BUILD_BUG_ON(MODULES_VSIZE < SZ_2G); + + if (!kaslr_enabled()) { + if (kernel_size < SZ_128M) + module_direct_base = kernel_end - SZ_128M; + if (kernel_size < SZ_2G) + module_plt_base = kernel_end - SZ_2G; } else { - /* - * Randomize the module region by setting module_alloc_base to - * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, - * _stext) . This guarantees that the resulting region still - * covers [_stext, _etext], and that all relative branches can - * be resolved without veneers unless this region is exhausted - * and we fall back to a larger 2GB window in module_alloc() - * when ARM64_MODULE_PLTS is enabled. - */ - module_range = MODULES_VSIZE - (u64)(_etext - _stext); + u64 min = kernel_start; + u64 max = kernel_end; + + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { + pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n"); + } else { + module_direct_base = random_bounding_box(SZ_128M, min, max); + if (module_direct_base) { + min = module_direct_base; + max = module_direct_base + SZ_128M; + } + } + + module_plt_base = random_bounding_box(SZ_2G, min, max); } - /* use the lower 21 bits to randomize the base of the module region */ - module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; - module_alloc_base &= PAGE_MASK; + pr_info("%llu pages in range for non-PLT usage", + module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); + pr_info("%llu pages in range for PLT usage", + module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); return 0; } -subsys_initcall(kaslr_module_init) -#endif +subsys_initcall(module_init_limits); void *module_alloc(unsigned long size) { - u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; - void *p; + void *p = NULL; /* * Where possible, prefer to allocate within direct branch range of the - * kernel such that no PLTs are necessary. This may fail, so we pass - * __GFP_NOWARN to silence the resulting warning. + * kernel such that no PLTs are necessary. */ - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_end, GFP_KERNEL | __GFP_NOWARN, - PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); + if (module_direct_base) { + p = __vmalloc_node_range(size, MODULE_ALIGN, + module_direct_base, + module_direct_base + SZ_128M, + GFP_KERNEL | __GFP_NOWARN, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); + } + + if (!p && module_plt_base) { + p = __vmalloc_node_range(size, MODULE_ALIGN, + module_plt_base, + module_plt_base + SZ_2G, + GFP_KERNEL | __GFP_NOWARN, + PAGE_KERNEL, 0, NUMA_NO_NODE, + __builtin_return_address(0)); + } if (!p) { - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, - module_alloc_base + SZ_2G, GFP_KERNEL, - PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); + pr_warn_ratelimited("%s: unable to allocate memory\n", + __func__); } if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) { -- cgit