diff options
Diffstat (limited to 'arch/powerpc/kernel/module_64.c')
| -rw-r--r-- | arch/powerpc/kernel/module_64.c | 904 |
1 files changed, 637 insertions, 267 deletions
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 0b0f89685b67..2a44bc8e2439 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Kernel module help for PPC64. Copyright (C) 2001, 2003 Rusty Russell IBM Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -26,12 +14,14 @@ #include <linux/ftrace.h> #include <linux/bug.h> #include <linux/uaccess.h> +#include <linux/kernel.h> #include <asm/module.h> #include <asm/firmware.h> -#include <asm/code-patching.h> +#include <asm/text-patching.h> #include <linux/sort.h> #include <asm/setup.h> #include <asm/sections.h> +#include <asm/inst.h> /* FIXME: We don't do .init separately. To do this, we'd need to have a separate r2 value in the init and core section, and stub between @@ -41,22 +31,25 @@ this, and makes other things simpler. Anton? --RR. */ -#ifdef PPC64_ELF_ABI_v2 +bool module_elf_check_arch(Elf_Ehdr *hdr) +{ + unsigned long abi_level = hdr->e_flags & 0x3; + + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2)) + return abi_level == 2; + else + return abi_level < 2; +} -/* An address is simply the address of the function. */ -typedef unsigned long func_desc_t; +#ifdef CONFIG_PPC64_ELF_ABI_V2 static func_desc_t func_desc(unsigned long addr) { - return addr; -} -static unsigned long func_addr(unsigned long addr) -{ - return addr; -} -static unsigned long stub_func_addr(func_desc_t func) -{ - return func; + func_desc_t desc = { + .addr = addr, + }; + + return desc; } /* PowerPC64 specific values for the Elf64_Sym st_other field. */ @@ -74,111 +67,99 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym) } #else -/* An address is address of the OPD entry, which contains address of fn. */ -typedef struct ppc64_opd_entry func_desc_t; - static func_desc_t func_desc(unsigned long addr) { - return *(struct ppc64_opd_entry *)addr; + return *(struct func_desc *)addr; } -static unsigned long func_addr(unsigned long addr) +static unsigned int local_entry_offset(const Elf64_Sym *sym) { - return func_desc(addr).funcaddr; + return 0; } -static unsigned long stub_func_addr(func_desc_t func) + +void *dereference_module_function_descriptor(struct module *mod, void *ptr) { - return func.funcaddr; + if (ptr < (void *)mod->arch.start_opd || + ptr >= (void *)mod->arch.end_opd) + return ptr; + + return dereference_function_descriptor(ptr); } -static unsigned int local_entry_offset(const Elf64_Sym *sym) +#endif + +static unsigned long func_addr(unsigned long addr) { - return 0; + return func_desc(addr).addr; +} + +static unsigned long stub_func_addr(func_desc_t func) +{ + return func.addr; } -#endif #define STUB_MAGIC 0x73747562 /* stub */ /* Like PPC32, we need little trampolines to do > 24-bit jumps (into the kernel itself). But on PPC64, these need to be used for every jump, actually, to reset r2 (TOC+0x8000). */ -struct ppc64_stub_entry -{ - /* 28 byte jump instruction sequence (7 instructions). We only - * need 6 instructions on ABIv2 but we always allocate 7 so - * so we don't have to modify the trampoline load instruction. */ +struct ppc64_stub_entry { + /* + * 28 byte jump instruction sequence (7 instructions) that can + * hold ppc64_stub_insns or stub_insns. Must be 8-byte aligned + * with PCREL kernels that use prefix instructions in the stub. + */ u32 jump[7]; /* Used by ftrace to identify stubs */ u32 magic; /* Data for the above code */ func_desc_t funcdata; +} __aligned(8); + +struct ppc64_got_entry { + u64 addr; }; /* * PPC64 uses 24 bit jumps, but we need to jump into other modules or * the kernel which may be further. So we jump to a stub. * - * For ELFv1 we need to use this to set up the new r2 value (aka TOC - * pointer). For ELFv2 it's the callee's responsibility to set up the - * new r2, but for both we need to save the old r2. + * Target address and TOC are loaded from function descriptor in the + * ppc64_stub_entry. * - * We could simply patch the new r2 value and function pointer into - * the stub, but it's significantly shorter to put these values at the - * end of the stub code, and patch the stub address (32-bits relative - * to the TOC ptr, r2) into the stub. + * r12 is used to generate the target address, which is required for the + * ELFv2 global entry point calling convention. + * + * TOC handling: + * - PCREL does not have a TOC. + * - ELFv2 non-PCREL just has to save r2, the callee is responsible for + * setting its own TOC pointer at the global entry address. + * - ELFv1 must load the new TOC pointer from the function descriptor. */ - static u32 ppc64_stub_insns[] = { - 0x3d620000, /* addis r11,r2, <high> */ - 0x396b0000, /* addi r11,r11, <low> */ +#ifdef CONFIG_PPC_KERNEL_PCREL + /* pld r12,addr */ + PPC_PREFIX_8LS | __PPC_PRFX_R(1), + PPC_INST_PLD | ___PPC_RT(_R12), +#else + PPC_RAW_ADDIS(_R11, _R2, 0), + PPC_RAW_ADDI(_R11, _R11, 0), /* Save current r2 value in magic place on the stack. */ - 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ - 0xe98b0020, /* ld r12,32(r11) */ -#ifdef PPC64_ELF_ABI_v1 + PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET), + PPC_RAW_LD(_R12, _R11, 32), +#ifdef CONFIG_PPC64_ELF_ABI_V1 /* Set up new r2 from function descriptor */ - 0xe84b0028, /* ld r2,40(r11) */ + PPC_RAW_LD(_R2, _R11, 40), #endif - 0x7d8903a6, /* mtctr r12 */ - 0x4e800420 /* bctr */ -}; - -#ifdef CONFIG_DYNAMIC_FTRACE -int module_trampoline_target(struct module *mod, unsigned long addr, - unsigned long *target) -{ - struct ppc64_stub_entry *stub; - func_desc_t funcdata; - u32 magic; - - if (!within_module_core(addr, mod)) { - pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - stub = (struct ppc64_stub_entry *)addr; - - if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { - pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - if (magic != STUB_MAGIC) { - pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { - pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); - return -EFAULT; - } - - *target = stub_func_addr(funcdata); - - return 0; -} #endif + PPC_RAW_MTCTR(_R12), + PPC_RAW_BCTR(), +}; -/* Count how many different 24-bit relocations (different symbol, - different addend) */ -static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) +/* + * Count how many different r_type relocations (different symbol, + * different addend). + */ +static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num, + unsigned long r_type) { unsigned int i, r_info, r_addend, _count_relocs; @@ -187,8 +168,8 @@ static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num) r_info = 0; r_addend = 0; for (i = 0; i < num; i++) - /* Only count 24-bit relocs, others don't need stubs */ - if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 && + /* Only count r_type relocs, others don't need stubs */ + if (ELF64_R_TYPE(rela[i].r_info) == r_type && (r_info != ELF64_R_SYM(rela[i].r_info) || r_addend != rela[i].r_addend)) { _count_relocs++; @@ -222,27 +203,13 @@ static int relacmp(const void *_x, const void *_y) return 0; } -static void relaswap(void *_x, void *_y, int size) -{ - uint64_t *x, *y, tmp; - int i; - - y = (uint64_t *)_x; - x = (uint64_t *)_y; - - for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) { - tmp = x[i]; - x[i] = y[i]; - y[i] = tmp; - } -} - /* Get size of potential trampolines required. */ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, - const Elf64_Shdr *sechdrs) + const Elf64_Shdr *sechdrs, + char *secstrings, + struct module *me) { - /* One extra reloc so it's always 0-funcaddr terminated */ - unsigned long relocs = 1; + unsigned long relocs = 0; unsigned i; /* Every relocated section... */ @@ -255,28 +222,136 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, /* Sort the relocation information based on a symbol and * addend key. This is a stable O(n*log n) complexity - * alogrithm but it will reduce the complexity of + * algorithm but it will reduce the complexity of * count_relocs() to linear complexity O(n) */ sort((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size / sizeof(Elf64_Rela), - sizeof(Elf64_Rela), relacmp, relaswap); + sizeof(Elf64_Rela), relacmp, NULL); relocs += count_relocs((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size - / sizeof(Elf64_Rela)); + / sizeof(Elf64_Rela), + R_PPC_REL24); +#ifdef CONFIG_PPC_KERNEL_PCREL + relocs += count_relocs((void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size + / sizeof(Elf64_Rela), + R_PPC64_REL24_NOTOC); +#endif } } -#ifdef CONFIG_DYNAMIC_FTRACE - /* make the trampoline to the ftrace_caller */ - relocs++; + /* stubs for ftrace_caller and ftrace_regs_caller */ + relocs += IS_ENABLED(CONFIG_DYNAMIC_FTRACE) + IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS); + +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + /* stubs for the function tracer */ + for (i = 1; i < hdr->e_shnum; i++) { + if (!strcmp(secstrings + sechdrs[i].sh_name, "__patchable_function_entries")) { + me->arch.ool_stub_count = sechdrs[i].sh_size / sizeof(unsigned long); + me->arch.ool_stub_index = 0; + relocs += roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub), + sizeof(struct ppc64_stub_entry)) / + sizeof(struct ppc64_stub_entry); + break; + } + } #endif pr_debug("Looks like a total of %lu stubs, max\n", relocs); return relocs * sizeof(struct ppc64_stub_entry); } +#ifdef CONFIG_PPC_KERNEL_PCREL +static int count_pcpu_relocs(const Elf64_Shdr *sechdrs, + const Elf64_Rela *rela, unsigned int num, + unsigned int symindex, unsigned int pcpu) +{ + unsigned int i, r_info, r_addend, _count_relocs; + + _count_relocs = 0; + r_info = 0; + r_addend = 0; + + for (i = 0; i < num; i++) { + Elf64_Sym *sym; + + /* This is the symbol it is referring to */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rela[i].r_info); + + if (sym->st_shndx == pcpu && + (r_info != ELF64_R_SYM(rela[i].r_info) || + r_addend != rela[i].r_addend)) { + _count_relocs++; + r_info = ELF64_R_SYM(rela[i].r_info); + r_addend = rela[i].r_addend; + } + } + + return _count_relocs; +} + +/* Get size of potential GOT required. */ +static unsigned long get_got_size(const Elf64_Ehdr *hdr, + const Elf64_Shdr *sechdrs, + struct module *me) +{ + /* One extra reloc so it's always 0-addr terminated */ + unsigned long relocs = 1; + unsigned int i, symindex = 0; + + for (i = 1; i < hdr->e_shnum; i++) { + if (sechdrs[i].sh_type == SHT_SYMTAB) { + symindex = i; + break; + } + } + WARN_ON_ONCE(!symindex); + + /* Every relocated section... */ + for (i = 1; i < hdr->e_shnum; i++) { + if (sechdrs[i].sh_type == SHT_RELA) { + pr_debug("Found relocations in section %u\n", i); + pr_debug("Ptr: %p. Number: %llu\n", (void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size / sizeof(Elf64_Rela)); + + /* + * Sort the relocation information based on a symbol and + * addend key. This is a stable O(n*log n) complexity + * algorithm but it will reduce the complexity of + * count_relocs() to linear complexity O(n) + */ + sort((void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size / sizeof(Elf64_Rela), + sizeof(Elf64_Rela), relacmp, NULL); + + relocs += count_relocs((void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size + / sizeof(Elf64_Rela), + R_PPC64_GOT_PCREL34); + + /* + * Percpu data access typically gets linked with + * REL34 relocations, but the percpu section gets + * moved at load time and requires that to be + * converted to GOT linkage. + */ + if (IS_ENABLED(CONFIG_SMP) && symindex) + relocs += count_pcpu_relocs(sechdrs, + (void *)sechdrs[i].sh_addr, + sechdrs[i].sh_size + / sizeof(Elf64_Rela), + symindex, me->arch.pcpu_section); + } + } + + pr_debug("Looks like a total of %lu GOT entries, max\n", relocs); + return relocs * sizeof(struct ppc64_got_entry); +} +#else /* CONFIG_PPC_KERNEL_PCREL */ + /* Still needed for ELFv2, for .TOC. */ static void dedotify_versions(struct modversion_info *vers, unsigned long size) @@ -289,6 +364,24 @@ static void dedotify_versions(struct modversion_info *vers, } } +/* Same as normal versions, remove a leading dot if present. */ +static void dedotify_ext_version_names(char *str_seq, unsigned long size) +{ + unsigned long out = 0; + unsigned long in; + char last = '\0'; + + for (in = 0; in < size; in++) { + /* Skip one leading dot */ + if (last == '\0' && str_seq[in] == '.') + in++; + last = str_seq[in]; + str_seq[out++] = last; + } + /* Zero the trailing portion of the names table for robustness */ + memset(&str_seq[out], 0, size - out); +} + /* * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. * seem to be defined (value set later). @@ -326,6 +419,13 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, } return NULL; } +#endif /* CONFIG_PPC_KERNEL_PCREL */ + +bool module_init_section(const char *name) +{ + /* We don't handle .init for the moment: always return false. */ + return false; +} int module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, @@ -336,24 +436,34 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, /* Find .toc and .stubs sections, symtab and strtab */ for (i = 1; i < hdr->e_shnum; i++) { - char *p; if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0) me->arch.stubs_section = i; - else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) +#ifdef CONFIG_PPC_KERNEL_PCREL + else if (strcmp(secstrings + sechdrs[i].sh_name, ".data..percpu") == 0) + me->arch.pcpu_section = i; + else if (strcmp(secstrings + sechdrs[i].sh_name, ".mygot") == 0) { + me->arch.got_section = i; + if (sechdrs[i].sh_addralign < 8) + sechdrs[i].sh_addralign = 8; + } +#else + else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) { me->arch.toc_section = i; - else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) + if (sechdrs[i].sh_addralign < 8) + sechdrs[i].sh_addralign = 8; + } else if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); - - /* We don't handle .init for the moment: rename to _init */ - while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) - p[0] = '_'; + else if (strcmp(secstrings + sechdrs[i].sh_name, "__version_ext_names") == 0) + dedotify_ext_version_names((void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size); if (sechdrs[i].sh_type == SHT_SYMTAB) dedotify((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size / sizeof(Elf64_Sym), (void *)hdr + sechdrs[sechdrs[i].sh_link].sh_offset); +#endif } if (!me->arch.stubs_section) { @@ -361,56 +471,221 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, return -ENOEXEC; } +#ifdef CONFIG_PPC_KERNEL_PCREL + if (!me->arch.got_section) { + pr_err("%s: doesn't contain .mygot.\n", me->name); + return -ENOEXEC; + } + + /* Override the got size */ + sechdrs[me->arch.got_section].sh_size = get_got_size(hdr, sechdrs, me); +#else /* If we don't have a .toc, just use .stubs. We need to set r2 to some reasonable value in case the module calls out to other functions via a stub, or if a function pointer escapes the module by some means. */ if (!me->arch.toc_section) me->arch.toc_section = me->arch.stubs_section; +#endif /* Override the stubs size */ - sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); + sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs, secstrings, me); + return 0; } -/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this - gives the value maximum span in an instruction which uses a signed - offset) */ -static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me) +#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) + +static u32 stub_insns[] = { +#ifdef CONFIG_PPC_KERNEL_PCREL + PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)), + PPC_RAW_NOP(), /* align the prefix insn */ + /* paddi r12,r12,addr */ + PPC_PREFIX_MLS | __PPC_PRFX_R(0), + PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12), + PPC_RAW_MTCTR(_R12), + PPC_RAW_BCTR(), +#else + PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)), + PPC_RAW_ADDIS(_R12, _R12, 0), + PPC_RAW_ADDI(_R12, _R12, 0), + PPC_RAW_MTCTR(_R12), + PPC_RAW_BCTR(), +#endif +}; + +/* + * For mprofile-kernel we use a special stub for ftrace_caller() because we + * can't rely on r2 containing this module's TOC when we enter the stub. + * + * That can happen if the function calling us didn't need to use the toc. In + * that case it won't have setup r2, and the r2 value will be either the + * kernel's toc, or possibly another modules toc. + * + * To deal with that this stub uses the kernel toc, which is always accessible + * via the paca (in r13). The target (ftrace_caller()) is responsible for + * saving and restoring the toc before returning. + */ +static inline int create_ftrace_stub(struct ppc64_stub_entry *entry, + unsigned long addr, + struct module *me) { - return sechdrs[me->arch.toc_section].sh_addr + 0x8000; + long reladdr; + + if ((unsigned long)entry->jump % 8 != 0) { + pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name); + return 0; + } + + BUILD_BUG_ON(sizeof(stub_insns) > sizeof(entry->jump)); + memcpy(entry->jump, stub_insns, sizeof(stub_insns)); + + if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { + /* Stub uses address relative to kernel base (from the paca) */ + reladdr = addr - local_paca->kernelbase; + if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) { + pr_err("%s: Address of %ps out of range of 34-bit relative address.\n", + me->name, (void *)addr); + return 0; + } + + entry->jump[2] |= IMM_H18(reladdr); + entry->jump[3] |= IMM_L(reladdr); + } else { + /* Stub uses address relative to kernel toc (from the paca) */ + reladdr = addr - kernel_toc_addr(); + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("%s: Address of %ps out of range of kernel_toc.\n", + me->name, (void *)addr); + return 0; + } + + entry->jump[1] |= PPC_HA(reladdr); + entry->jump[2] |= PPC_LO(reladdr); + } + + /* Even though we don't use funcdata in the stub, it's needed elsewhere. */ + entry->funcdata = func_desc(addr); + entry->magic = STUB_MAGIC; + + return 1; +} + +static bool is_mprofile_ftrace_call(const char *name) +{ + if (!strcmp("_mcount", name)) + return true; +#ifdef CONFIG_DYNAMIC_FTRACE + if (!strcmp("ftrace_caller", name)) + return true; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!strcmp("ftrace_regs_caller", name)) + return true; +#endif +#endif + + return false; +} +#else +static inline int create_ftrace_stub(struct ppc64_stub_entry *entry, + unsigned long addr, + struct module *me) +{ + return 0; } -/* Both low and high 16 bits are added as SIGNED additions, so if low - 16 bits has high bit set, high 16 bits must be adjusted. These - macros do that (stolen from binutils). */ -#define PPC_LO(v) ((v) & 0xffff) -#define PPC_HI(v) (((v) >> 16) & 0xffff) -#define PPC_HA(v) PPC_HI ((v) + 0x8000) +static bool is_mprofile_ftrace_call(const char *name) +{ + return false; +} +#endif + +/* + * r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the + * value maximum span in an instruction which uses a signed offset). Round down + * to a 256 byte boundary for the odd case where we are setting up r2 without a + * .toc section. + */ +static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me) +{ +#ifndef CONFIG_PPC_KERNEL_PCREL + return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000; +#else + return -1; +#endif +} /* Patch stub to reference function and correct r2 value. */ static inline int create_stub(const Elf64_Shdr *sechdrs, struct ppc64_stub_entry *entry, unsigned long addr, - struct module *me) + struct module *me, + const char *name) { long reladdr; + func_desc_t desc; + int i; - memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); + if (is_mprofile_ftrace_call(name)) + return create_ftrace_stub(entry, addr, me); - /* Stub uses address relative to r2. */ - reladdr = (unsigned long)entry - my_r2(sechdrs, me); - if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { - pr_err("%s: Address %p of stub out of range of %p.\n", - me->name, (void *)reladdr, (void *)my_r2); + if ((unsigned long)entry->jump % 8 != 0) { + pr_err("%s: Address of stub entry is not 8-byte aligned\n", me->name); return 0; } - pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); - entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->funcdata = func_desc(addr); - entry->magic = STUB_MAGIC; + BUILD_BUG_ON(sizeof(ppc64_stub_insns) > sizeof(entry->jump)); + for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { + if (patch_instruction(&entry->jump[i], + ppc_inst(ppc64_stub_insns[i]))) + return 0; + } + + if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) { + /* Stub uses address relative to itself! */ + reladdr = 0 + offsetof(struct ppc64_stub_entry, funcdata); + BUILD_BUG_ON(reladdr != 32); + if (reladdr > 0x1FFFFFFFFL || reladdr < -0x200000000L) { + pr_err("%s: Address of %p out of range of 34-bit relative address.\n", + me->name, (void *)reladdr); + return 0; + } + pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); + + /* May not even need this if we're relative to 0 */ + if (patch_instruction(&entry->jump[0], + ppc_inst_prefix(entry->jump[0] | IMM_H18(reladdr), + entry->jump[1] | IMM_L(reladdr)))) + return 0; + + } else { + /* Stub uses address relative to r2. */ + reladdr = (unsigned long)entry - my_r2(sechdrs, me); + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("%s: Address %p of stub out of range of %p.\n", + me->name, (void *)reladdr, (void *)my_r2); + return 0; + } + pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); + + if (patch_instruction(&entry->jump[0], + ppc_inst(entry->jump[0] | PPC_HA(reladdr)))) + return 0; + + if (patch_instruction(&entry->jump[1], + ppc_inst(entry->jump[1] | PPC_LO(reladdr)))) + return 0; + } + + // func_desc_t is 8 bytes if ABIv2, else 16 bytes + desc = func_desc(addr); + for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) { + if (patch_u32(((u32 *)&entry->funcdata) + i, ((u32 *)&desc)[i])) + return 0; + } + + if (patch_u32(&entry->magic, STUB_MAGIC)) + return 0; return 1; } @@ -419,7 +694,8 @@ static inline int create_stub(const Elf64_Shdr *sechdrs, stub to set up the TOC ptr (r2) for the function. */ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, unsigned long addr, - struct module *me) + struct module *me, + const char *name) { struct ppc64_stub_entry *stubs; unsigned int i, num_stubs; @@ -428,75 +704,90 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, /* Find this stub, or if that fails, the next avail. entry */ stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { - BUG_ON(i >= num_stubs); + for (i = 0; i < me->arch.stub_count; i++) { + if (WARN_ON(i >= num_stubs)) + return 0; if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) return (unsigned long)&stubs[i]; } - if (!create_stub(sechdrs, &stubs[i], addr, me)) + if (!create_stub(sechdrs, &stubs[i], addr, me, name)) return 0; + me->arch.stub_count++; return (unsigned long)&stubs[i]; } -#ifdef CC_USING_MPROFILE_KERNEL -static bool is_early_mcount_callsite(u32 *instruction) +#ifdef CONFIG_PPC_KERNEL_PCREL +/* Create GOT to load the location described in this ptr */ +static unsigned long got_for_addr(const Elf64_Shdr *sechdrs, + unsigned long addr, + struct module *me, + const char *name) { - /* - * Check if this is one of the -mprofile-kernel sequences. - */ - if (instruction[-1] == PPC_INST_STD_LR && - instruction[-2] == PPC_INST_MFLR) - return true; + struct ppc64_got_entry *got; + unsigned int i, num_got; - if (instruction[-1] == PPC_INST_MFLR) - return true; + if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + return addr; - return false; -} + num_got = sechdrs[me->arch.got_section].sh_size / sizeof(*got); -/* - * In case of _mcount calls, do not save the current callee's TOC (in r2) into - * the original caller's stack frame. If we did we would clobber the saved TOC - * value of the original caller. - */ -static void squash_toc_save_inst(const char *name, unsigned long addr) -{ - struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr; + /* Find this stub, or if that fails, the next avail. entry */ + got = (void *)sechdrs[me->arch.got_section].sh_addr; + for (i = 0; got[i].addr; i++) { + if (WARN_ON(i >= num_got)) + return 0; - /* Only for calls to _mcount */ - if (strcmp("_mcount", name) != 0) - return; + if (got[i].addr == addr) + return (unsigned long)&got[i]; + } - stub->jump[2] = PPC_INST_NOP; -} -#else -static void squash_toc_save_inst(const char *name, unsigned long addr) { } + got[i].addr = addr; -/* without -mprofile-kernel, mcount calls are never early */ -static bool is_early_mcount_callsite(u32 *instruction) -{ - return false; + return (unsigned long)&got[i]; } #endif /* We expect a noop next: if it is, replace it with instruction to restore r2. */ -static int restore_r2(u32 *instruction, struct module *me) +static int restore_r2(const char *name, u32 *instruction, struct module *me) { - if (is_early_mcount_callsite(instruction - 1)) - return 1; + u32 *prev_insn = instruction - 1; + u32 insn_val = *instruction; + + if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + return 0; + + if (is_mprofile_ftrace_call(name)) + return 0; + + /* + * Make sure the branch isn't a sibling call. Sibling calls aren't + * "link" branches and they don't return, so they don't need the r2 + * restore afterwards. + */ + if (!instr_is_relative_link_branch(ppc_inst(*prev_insn))) + return 0; - if (*instruction != PPC_INST_NOP) { - pr_err("%s: Expect noop after relocate, got %08x\n", - me->name, *instruction); + /* + * For livepatch, the restore r2 instruction might have already been + * written previously, if the referenced symbol is in a previously + * unloaded module which is now being loaded again. In that case, skip + * the warning and the instruction write. + */ + if (insn_val == PPC_INST_LD_TOC) return 0; + + if (insn_val != PPC_RAW_NOP()) { + pr_err("%s: Expected nop after call, got %08x at %pS\n", + me->name, insn_val, instruction); + return -ENOEXEC; } + /* ld r2,R2_STACK_OFFSET(r1) */ - *instruction = PPC_INST_LD_TOC; - return 1; + return patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)); } int apply_relocate_add(Elf64_Shdr *sechdrs, @@ -514,6 +805,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, pr_debug("Applying ADD relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); +#ifndef CONFIG_PPC_KERNEL_PCREL /* First time we're called, we can fix up .TOC. */ if (!me->arch.toc_fixed) { sym = find_dot_toc(sechdrs, strtab, symindex); @@ -523,7 +815,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, sym->st_value = my_r2(sechdrs, me); me->arch.toc_fixed = true; } - +#endif for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -551,6 +843,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *(unsigned long *)location = value; break; +#ifndef CONFIG_PPC_KERNEL_PCREL case R_PPC64_TOC: *(unsigned long *)location = my_r2(sechdrs, me); break; @@ -610,18 +903,24 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, = (*((uint16_t *) location) & ~0xffff) | (value & 0xffff); break; +#endif case R_PPC_REL24: +#ifdef CONFIG_PPC_KERNEL_PCREL + /* PCREL still generates REL24 for mcount */ + case R_PPC64_REL24_NOTOC: +#endif /* FIXME: Handle weak symbols here --RR */ - if (sym->st_shndx == SHN_UNDEF) { + if (sym->st_shndx == SHN_UNDEF || + sym->st_shndx == SHN_LIVEPATCH) { /* External: go via stub */ - value = stub_for_addr(sechdrs, value, me); + value = stub_for_addr(sechdrs, value, me, + strtab + sym->st_name); if (!value) return -ENOENT; - if (!restore_r2((u32 *)location + 1, me)) + if (restore_r2(strtab + sym->st_name, + (u32 *)location + 1, me)) return -ENOEXEC; - - squash_toc_save_inst(strtab + sym->st_name, value); } else value += local_entry_offset(sym); @@ -634,9 +933,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } /* Only replace bits 2 through 26 */ - *(uint32_t *)location - = (*(uint32_t *)location & ~0x03fffffc) - | (value & 0x03fffffc); + value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value); + + if (patch_instruction((u32 *)location, ppc_inst(value))) + return -EFAULT; + break; case R_PPC64_REL64: @@ -646,9 +947,57 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_PPC64_REL32: /* 32 bits relative (used by relative exception tables) */ - *(u32 *)location = value - (unsigned long)location; + /* Convert value to relative */ + value -= (unsigned long)location; + if (value + 0x80000000 > 0xffffffff) { + pr_err("%s: REL32 %li out of range!\n", + me->name, (long int)value); + return -ENOEXEC; + } + *(u32 *)location = value; break; +#ifdef CONFIG_PPC_KERNEL_PCREL + case R_PPC64_PCREL34: { + unsigned long absvalue = value; + + /* Convert value to relative */ + value -= (unsigned long)location; + + if (value + 0x200000000 > 0x3ffffffff) { + if (sym->st_shndx != me->arch.pcpu_section) { + pr_err("%s: REL34 %li out of range!\n", + me->name, (long)value); + return -ENOEXEC; + } + + /* + * per-cpu section is special cased because + * it is moved during loading, so has to be + * converted to use GOT. + */ + value = got_for_addr(sechdrs, absvalue, me, + strtab + sym->st_name); + if (!value) + return -ENOENT; + value -= (unsigned long)location; + + /* Turn pla into pld */ + if (patch_instruction((u32 *)location, + ppc_inst_prefix((*(u32 *)location & ~0x02000000), + (*((u32 *)location + 1) & ~0xf8000000) | 0xe4000000))) + return -EFAULT; + } + + if (patch_instruction((u32 *)location, + ppc_inst_prefix((*(u32 *)location & ~0x3ffff) | IMM_H18(value), + (*((u32 *)location + 1) & ~0xffff) | IMM_L(value)))) + return -EFAULT; + + break; + } + +#else case R_PPC64_TOCSAVE: /* * Marker reloc indicates we don't have to save r2. @@ -656,8 +1005,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, * it. */ break; +#endif case R_PPC64_ENTRY: + if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) + break; + /* * Optimize ELFv2 large code model entry point if * the TOC is within 2GB range of current location. @@ -670,18 +1023,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, * ld r2, ...(r12) * add r2, r2, r12 */ - if ((((uint32_t *)location)[0] & ~0xfffc) - != 0xe84c0000) + if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0)) break; - if (((uint32_t *)location)[1] != 0x7c426214) + if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12)) break; /* * If found, replace it with: * addis r2, r12, (.TOC.-func)@ha - * addi r2, r12, (.TOC.-func)@l + * addi r2, r2, (.TOC.-func)@l */ - ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); - ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); + ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value)); + ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value)); break; case R_PPC64_REL16_HA: @@ -701,6 +1053,20 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | (value & 0xffff); break; +#ifdef CONFIG_PPC_KERNEL_PCREL + case R_PPC64_GOT_PCREL34: + value = got_for_addr(sechdrs, value, me, + strtab + sym->st_name); + if (!value) + return -ENOENT; + value -= (unsigned long)location; + ((uint32_t *)location)[0] = (((uint32_t *)location)[0] & ~0x3ffff) | + ((value >> 16) & 0x3ffff); + ((uint32_t *)location)[1] = (((uint32_t *)location)[1] & ~0xffff) | + (value & 0xffff); + break; +#endif + default: pr_err("%s: Unknown ADD relocation: %lu\n", me->name, @@ -713,80 +1079,84 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } #ifdef CONFIG_DYNAMIC_FTRACE - -#ifdef CC_USING_MPROFILE_KERNEL - -#define PACATOC offsetof(struct paca_struct, kernel_toc) - -/* - * For mprofile-kernel we use a special stub for ftrace_caller() because we - * can't rely on r2 containing this module's TOC when we enter the stub. - * - * That can happen if the function calling us didn't need to use the toc. In - * that case it won't have setup r2, and the r2 value will be either the - * kernel's toc, or possibly another modules toc. - * - * To deal with that this stub uses the kernel toc, which is always accessible - * via the paca (in r13). The target (ftrace_caller()) is responsible for - * saving and restoring the toc before returning. - */ -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) +int module_trampoline_target(struct module *mod, unsigned long addr, + unsigned long *target) { - struct ppc64_stub_entry *entry; - unsigned int i, num_stubs; - static u32 stub_insns[] = { - 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ - 0x3d8c0000, /* addis r12,r12,<high> */ - 0x398c0000, /* addi r12,r12,<low> */ - 0x7d8903a6, /* mtctr r12 */ - 0x4e800420, /* bctr */ - }; - long reladdr; + struct ppc64_stub_entry *stub; + func_desc_t funcdata; + u32 magic; - num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry); + if (!within_module_core(addr, mod)) { + pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name); + return -EFAULT; + } - /* Find the next available stub entry */ - entry = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++); + stub = (struct ppc64_stub_entry *)addr; - if (i >= num_stubs) { - pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name); - return 0; + if (copy_from_kernel_nofault(&magic, &stub->magic, + sizeof(magic))) { + pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; } - memcpy(entry->jump, stub_insns, sizeof(stub_insns)); - - /* Stub uses address relative to kernel toc (from the paca) */ - reladdr = (unsigned long)ftrace_caller - kernel_toc_addr(); - if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { - pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name); - return 0; + if (magic != STUB_MAGIC) { + pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; } - entry->jump[1] |= PPC_HA(reladdr); - entry->jump[2] |= PPC_LO(reladdr); + if (copy_from_kernel_nofault(&funcdata, &stub->funcdata, + sizeof(funcdata))) { + pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); + return -EFAULT; + } - /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */ - entry->funcdata = func_desc((unsigned long)ftrace_caller); - entry->magic = STUB_MAGIC; + *target = stub_func_addr(funcdata); - return (unsigned long)entry; + return 0; } -#else -static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me) + +static int setup_ftrace_ool_stubs(const Elf64_Shdr *sechdrs, unsigned long addr, struct module *me) { - return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me); -} +#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE + unsigned int total_stubs, num_stubs; + struct ppc64_stub_entry *stub; + + total_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stub); + num_stubs = roundup(me->arch.ool_stub_count * sizeof(struct ftrace_ool_stub), + sizeof(struct ppc64_stub_entry)) / sizeof(struct ppc64_stub_entry); + + if (WARN_ON(me->arch.stub_count + num_stubs > total_stubs)) + return -1; + + stub = (void *)sechdrs[me->arch.stubs_section].sh_addr; + me->arch.ool_stubs = (struct ftrace_ool_stub *)(stub + me->arch.stub_count); + me->arch.stub_count += num_stubs; #endif + return 0; +} + int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) { - mod->arch.toc = my_r2(sechdrs, mod); - mod->arch.tramp = create_ftrace_stub(sechdrs, mod); + mod->arch.tramp = stub_for_addr(sechdrs, + (unsigned long)ftrace_caller, + mod, + "ftrace_caller"); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + mod->arch.tramp_regs = stub_for_addr(sechdrs, + (unsigned long)ftrace_regs_caller, + mod, + "ftrace_regs_caller"); + if (!mod->arch.tramp_regs) + return -ENOENT; +#endif if (!mod->arch.tramp) return -ENOENT; + if (setup_ftrace_ool_stubs(sechdrs, mod->arch.tramp, mod)) + return -ENOENT; + return 0; } #endif |
