diff options
Diffstat (limited to 'arch/x86/kernel/module.c')
| -rw-r--r-- | arch/x86/kernel/module.c | 219 |
1 files changed, 99 insertions, 120 deletions
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 705fb2a41d7d..11c45ce42694 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -19,6 +19,7 @@ #include <linux/jump_label.h> #include <linux/random.h> #include <linux/memory.h> +#include <linux/stackprotector.h> #include <asm/text-patching.h> #include <asm/page.h> @@ -36,57 +37,6 @@ do { \ } while (0) #endif -#ifdef CONFIG_RANDOMIZE_BASE -static unsigned long module_load_offset; - -/* Mutex protects the module_load_offset. */ -static DEFINE_MUTEX(module_kaslr_mutex); - -static unsigned long int get_module_load_offset(void) -{ - if (kaslr_enabled()) { - mutex_lock(&module_kaslr_mutex); - /* - * Calculate the module_load_offset the first time this - * code is called. Once calculated it stays the same until - * reboot. - */ - if (module_load_offset == 0) - module_load_offset = - get_random_u32_inclusive(1, 1024) * PAGE_SIZE; - mutex_unlock(&module_kaslr_mutex); - } - return module_load_offset; -} -#else -static unsigned long int get_module_load_offset(void) -{ - return 0; -} -#endif - -void *module_alloc(unsigned long size) -{ - gfp_t gfp_mask = GFP_KERNEL; - void *p; - - if (PAGE_ALIGN(size) > MODULES_LEN) - return NULL; - - p = __vmalloc_node_range(size, MODULE_ALIGN, - MODULES_VADDR + get_module_load_offset(), - MODULES_END, gfp_mask, PAGE_KERNEL, - VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, - NUMA_NO_NODE, __builtin_return_address(0)); - - if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { - vfree(p); - return NULL; - } - - return p; -} - #ifdef CONFIG_X86_32 int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, @@ -129,22 +79,28 @@ int apply_relocate(Elf32_Shdr *sechdrs, return 0; } #else /*X86_64*/ -static int __apply_relocate_add(Elf64_Shdr *sechdrs, +static int __write_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me, - void *(*write)(void *dest, const void *src, size_t len)) + void *(*write)(void *dest, const void *src, size_t len), + bool apply) { unsigned int i; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf64_Sym *sym; void *loc; u64 val; + u64 zero = 0ULL; - DEBUGP("Applying relocate section %u to %u\n", + DEBUGP("%s relocate section %u to %u\n", + apply ? "Applying" : "Clearing", relsec, sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + size_t size; + /* This is where to make the change */ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; @@ -162,69 +118,83 @@ static int __apply_relocate_add(Elf64_Shdr *sechdrs, switch (ELF64_R_TYPE(rel[i].r_info)) { case R_X86_64_NONE: - break; + continue; /* nothing to write */ case R_X86_64_64: - if (*(u64 *)loc != 0) - goto invalid_relocation; - write(loc, &val, 8); + size = 8; break; case R_X86_64_32: - if (*(u32 *)loc != 0) - goto invalid_relocation; - write(loc, &val, 4); - if (val != *(u32 *)loc) + if (val != *(u32 *)&val) goto overflow; + size = 4; break; case R_X86_64_32S: - if (*(s32 *)loc != 0) - goto invalid_relocation; - write(loc, &val, 4); - if ((s64)val != *(s32 *)loc) + if ((s64)val != *(s32 *)&val) goto overflow; + size = 4; break; +#if defined(CONFIG_STACKPROTECTOR) && \ + defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000 + case R_X86_64_REX_GOTPCRELX: { + static unsigned long __percpu *const addr = &__stack_chk_guard; + + if (sym->st_value != (u64)addr) { + pr_err("%s: Unsupported GOTPCREL relocation\n", me->name); + return -ENOEXEC; + } + + val = (u64)&addr + rel[i].r_addend; + fallthrough; + } +#endif case R_X86_64_PC32: case R_X86_64_PLT32: - if (*(u32 *)loc != 0) - goto invalid_relocation; val -= (u64)loc; - write(loc, &val, 4); -#if 0 - if ((s64)val != *(s32 *)loc) - goto overflow; -#endif + size = 4; break; case R_X86_64_PC64: - if (*(u64 *)loc != 0) - goto invalid_relocation; val -= (u64)loc; - write(loc, &val, 8); + size = 8; break; default: pr_err("%s: Unknown rela relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } + + if (apply) { + if (memcmp(loc, &zero, size)) { + pr_err("x86/modules: Invalid relocation target, existing value is nonzero for sec %u, idx %u, type %d, loc %lx, val %llx\n", + relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), + (unsigned long)loc, val); + return -ENOEXEC; + } + write(loc, &val, size); + } else { + if (memcmp(loc, &val, size)) { + pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for sec %u, idx %u, type %d, loc %lx, val %llx\n", + relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), + (unsigned long)loc, val); + return -ENOEXEC; + } + write(loc, &zero, size); + } } return 0; -invalid_relocation: - pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", - (int)ELF64_R_TYPE(rel[i].r_info), loc, val); - return -ENOEXEC; - overflow: - pr_err("overflow in relocation type %d val %Lx\n", - (int)ELF64_R_TYPE(rel[i].r_info), val); + pr_err("overflow in relocation type %d val %llx sec %u idx %d\n", + (int)ELF64_R_TYPE(rel[i].r_info), val, relsec, i); pr_err("`%s' likely not compiled with -mcmodel=kernel\n", me->name); return -ENOEXEC; } -int apply_relocate_add(Elf64_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) +static int write_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me, + bool apply) { int ret; bool early = me->state == MODULE_STATE_UNFORMED; @@ -235,17 +205,37 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, mutex_lock(&text_mutex); } - ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, - write); + ret = __write_relocate_add(sechdrs, strtab, symindex, relsec, me, + write, apply); if (!early) { - text_poke_sync(); + smp_text_poke_sync_each_cpu(); mutex_unlock(&text_mutex); } return ret; } +int apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + return write_relocate_add(sechdrs, strtab, symindex, relsec, me, true); +} + +#ifdef CONFIG_LIVEPATCH +void clear_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + write_relocate_add(sechdrs, strtab, symindex, relsec, me, false); +} +#endif + #endif int module_finalize(const Elf_Ehdr *hdr, @@ -253,7 +243,7 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { const Elf_Shdr *s, *alt = NULL, *locks = NULL, - *para = NULL, *orc = NULL, *orc_ip = NULL, + *orc = NULL, *orc_ip = NULL, *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, *calls = NULL, *cfi = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; @@ -263,8 +253,6 @@ int module_finalize(const Elf_Ehdr *hdr, alt = s; if (!strcmp(".smp_locks", secstrings + s->sh_name)) locks = s; - if (!strcmp(".parainstructions", secstrings + s->sh_name)) - para = s; if (!strcmp(".orc_unwind", secstrings + s->sh_name)) orc = s; if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) @@ -281,14 +269,8 @@ int module_finalize(const Elf_Ehdr *hdr, ibt_endbr = s; } - /* - * See alternative_instructions() for the ordering rules between the - * various patching types. - */ - if (para) { - void *pseg = (void *)para->sh_addr; - apply_paravirt(pseg, pseg + para->sh_size); - } + its_init_mod(me); + if (retpolines || cfi) { void *rseg = NULL, *cseg = NULL; unsigned int rsize = 0, csize = 0; @@ -309,38 +291,34 @@ int module_finalize(const Elf_Ehdr *hdr, void *rseg = (void *)retpolines->sh_addr; apply_retpolines(rseg, rseg + retpolines->sh_size); } + + its_fini_mod(me); + if (returns) { void *rseg = (void *)returns->sh_addr; apply_returns(rseg, rseg + returns->sh_size); } - if (alt) { - /* patch .altinstructions */ - void *aseg = (void *)alt->sh_addr; - apply_alternatives(aseg, aseg + alt->sh_size); - } - if (calls || para) { + if (calls) { struct callthunk_sites cs = {}; - if (calls) { - cs.call_start = (void *)calls->sh_addr; - cs.call_end = (void *)calls->sh_addr + calls->sh_size; - } - - if (para) { - cs.pv_start = (void *)para->sh_addr; - cs.pv_end = (void *)para->sh_addr + para->sh_size; - } + cs.call_start = (void *)calls->sh_addr; + cs.call_end = (void *)calls->sh_addr + calls->sh_size; callthunks_patch_module_calls(&cs, me); } + if (alt) { + /* patch .altinstructions */ + void *aseg = (void *)alt->sh_addr; + apply_alternatives(aseg, aseg + alt->sh_size); + } if (ibt_endbr) { void *iseg = (void *)ibt_endbr->sh_addr; - apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size); + apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size); } if (locks) { void *lseg = (void *)locks->sh_addr; - void *text = me->core_layout.base; - void *text_end = text + me->core_layout.text_size; + void *text = me->mem[MOD_TEXT].base; + void *text_end = text + me->mem[MOD_TEXT].size; alternatives_smp_module_add(me, me->name, lseg, lseg + locks->sh_size, text, text_end); @@ -356,4 +334,5 @@ int module_finalize(const Elf_Ehdr *hdr, void module_arch_cleanup(struct module *mod) { alternatives_smp_module_del(mod); + its_free_mod(mod); } |
