diff options
Diffstat (limited to 'arch/arm64/include/asm/module.h')
| -rw-r--r-- | arch/arm64/include/asm/module.h | 69 |
1 files changed, 45 insertions, 24 deletions
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 19bd97671bb8..fb9b88eebeb1 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -1,28 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ASM_MODULE_H #define __ASM_MODULE_H #include <asm-generic/module.h> -#define MODULE_ARCH_VERMAGIC "aarch64" - -#ifdef CONFIG_ARM64_MODULE_PLTS struct mod_plt_sec { - struct elf64_shdr *plt; + int plt_shndx; int plt_num_entries; int plt_max_entries; }; @@ -32,17 +18,52 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - void *ftrace_trampoline; + struct plt_entry *ftrace_trampolines; + struct plt_entry *init_ftrace_trampolines; }; -#endif -u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, +u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, const Elf64_Rela *rela, Elf64_Sym *sym); -#ifdef CONFIG_RANDOMIZE_BASE -extern u64 module_alloc_base; -#else -#define module_alloc_base ((u64)_etext - MODULES_VSIZE) -#endif +u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, u64 val); + +struct plt_entry { + /* + * A program that conforms to the AArch64 Procedure Call Standard + * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or + * IP1 (x17) may be inserted at any branch instruction that is + * exposed to a relocation that supports long branches. Since that + * is exactly what we are dealing with here, we are free to use x16 + * as a scratch register in the PLT veneers. + */ + __le32 adrp; /* adrp x16, .... */ + __le32 add; /* add x16, x16, #0x.... */ + __le32 br; /* br x16 */ +}; + +static inline bool is_forbidden_offset_for_adrp(void *place) +{ + return cpus_have_final_cap(ARM64_WORKAROUND_843419) && + ((u64)place & 0xfff) >= 0xff8; +} + +struct plt_entry get_plt_entry(u64 dst, void *pc); + +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + + return NULL; +} #endif /* __ASM_MODULE_H */ |
