diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/Kconfig.platforms | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/cfi.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/mman.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-prot.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 42 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 11 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/mmap.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 30 | ||||
-rw-r--r-- | arch/arm64/mm/ptdump_debugfs.c | 3 | ||||
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 30 |
12 files changed, 83 insertions, 57 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3c117b1fa198..e9bbfacc35a6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -42,7 +42,6 @@ config ARM64 select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTDUMP - select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG select ARCH_HAS_SETUP_DMA_OPS diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 90d6b028fbbb..a88f5ad9328c 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -333,7 +333,6 @@ config ARCH_STM32 bool "STMicroelectronics STM32 SoC Family" select GPIOLIB select PINCTRL - select PINCTRL_STM32MP257 select ARM_SMC_MBOX select ARM_SCMI_PROTOCOL select REGULATOR diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h new file mode 100644 index 000000000000..ab90f0351b7a --- /dev/null +++ b/arch/arm64/include/asm/cfi.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM64_CFI_H +#define _ASM_ARM64_CFI_H + +#define __bpfcall + +#endif /* _ASM_ARM64_CFI_H */ diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 21df8bbd2668..8770c7ee759f 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -11,10 +11,10 @@ #include <linux/shmem_fs.h> #include <linux/types.h> -static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, +static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { - unsigned long ret = 0; + vm_flags_t ret = 0; if (system_supports_bti() && (prot & PROT_BTI)) ret |= VM_ARM64_BTI; @@ -34,8 +34,8 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline unsigned long arch_calc_vm_flag_bits(struct file *file, - unsigned long flags) +static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file, + unsigned long flags) { /* * Only allow MTE on anonymous mappings as these are guaranteed to be @@ -68,7 +68,7 @@ static inline bool arch_validate_prot(unsigned long prot, } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) -static inline bool arch_validate_flags(unsigned long vm_flags) +static inline bool arch_validate_flags(vm_flags_t vm_flags) { if (system_supports_mte()) { /* diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 7830d031742e..85dceb1c66f4 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -17,7 +17,6 @@ #define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) -#define PTE_DEVMAP (_AT(pteval_t, 1) << 57) /* * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 192d86e1cc76..abd2dee416b3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -190,7 +190,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) -#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ PTE_ATTRINDX(MT_NORMAL_TAGGED)) @@ -372,11 +371,6 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } -static inline pte_t pte_mkdevmap(pte_t pte) -{ - return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); -} - #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP static inline int pte_uffd_wp(pte_t pte) { @@ -653,14 +647,6 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) return __pmd((pmd_val(pmd) & ~mask) | val); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) -#endif -static inline pmd_t pmd_mkdevmap(pmd_t pmd) -{ - return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); -} - #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) static inline pmd_t pmd_mkspecial(pmd_t pmd) @@ -1302,16 +1288,6 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); } - -static inline int pud_devmap(pud_t pud) -{ - return 0; -} - -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} #endif #ifdef CONFIG_PAGE_TABLE_CHECK @@ -1643,6 +1619,14 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, */ #define arch_wants_old_prefaulted_pte cpu_has_hw_af +/* + * Request exec memory is read into pagecache in at least 64K folios. This size + * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB + * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base + * pages are in use. + */ +#define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT) + static inline bool pud_sect_supported(void) { return PAGE_SIZE == SZ_4K; @@ -1659,6 +1643,16 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); +#define modify_prot_start_ptes modify_prot_start_ptes +extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr); + +#define modify_prot_commit_ptes modify_prot_commit_ptes +extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte, + unsigned int nr); + #ifdef CONFIG_ARM64_CONTPTE /* diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index aa9efee17277..18a5dc0c9a54 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -323,17 +323,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) } /* - * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure - * all the previously issued TLBIs targeting mm have completed. But since we - * can be executing on a remote CPU, a DSB cannot guarantee this like it can - * for arch_tlbbatch_flush(). Our only option is to flush the entire mm. - */ -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - -/* * To support TLB batched flush for multiple pages unmapping, we only send * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the * completion at the end in arch_tlbbatch_flush(). Since we've already issued diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index fcc783e8e9bb..d816ff44faff 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -555,7 +555,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, const struct fault_info *inf; struct mm_struct *mm = current->mm; vm_fault_t fault; - unsigned long vm_flags; + vm_flags_t vm_flags; unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned long addr = untagged_addr(far); struct vm_area_struct *vma; diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index c86c348857c4..08ee177432c2 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -81,7 +81,7 @@ static int __init adjust_protection_map(void) } arch_initcall(adjust_protection_map); -pgprot_t vm_get_page_prot(unsigned long vm_flags) +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { ptdesc_t prot; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 00ab1d648db6..abd9725796e9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -26,6 +26,7 @@ #include <linux/set_memory.h> #include <linux/kfence.h> #include <linux/pkeys.h> +#include <linux/mm_inline.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -720,7 +721,7 @@ void mark_rodata_ro(void) static void __init declare_vma(struct vm_struct *vma, void *va_start, void *va_end, - unsigned long vm_flags) + vm_flags_t vm_flags) { phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; @@ -1524,24 +1525,41 @@ static int __init prevent_bootmem_remove_init(void) early_initcall(prevent_bootmem_remove_init); #endif -pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, unsigned int nr) { + pte_t pte = get_and_clear_full_ptes(vma->vm_mm, addr, ptep, nr, /* full = */ 0); + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { /* * Break-before-make (BBM) is required for all user space mappings * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(ptep_get(ptep))) - return ptep_clear_flush(vma, addr, ptep); + if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte)) + __flush_tlb_range(vma, addr, nr * PAGE_SIZE, + PAGE_SIZE, true, 3); } - return ptep_get_and_clear(vma->vm_mm, addr, ptep); + + return pte; +} + +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + return modify_prot_start_ptes(vma, addr, ptep, 1); +} + +void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte, + unsigned int nr) +{ + set_ptes(vma->vm_mm, addr, ptep, pte, nr); } void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { - set_pte_at(vma->vm_mm, addr, ptep, pte); + modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1); } /* diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 68bf1a125502..1e308328c079 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/debugfs.h> -#include <linux/memory_hotplug.h> #include <linux/seq_file.h> #include <asm/ptdump.h> @@ -9,9 +8,7 @@ static int ptdump_show(struct seq_file *m, void *v) { struct ptdump_info *info = m->private; - get_online_mems(); ptdump_walk(m, info); - put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 97dfd5432809..52ffe115a8c4 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -10,6 +10,7 @@ #include <linux/arm-smccc.h> #include <linux/bitfield.h> #include <linux/bpf.h> +#include <linux/cfi.h> #include <linux/filter.h> #include <linux/memory.h> #include <linux/printk.h> @@ -114,6 +115,14 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx) ctx->idx++; } +static inline void emit_u32_data(const u32 data, struct jit_ctx *ctx) +{ + if (ctx->image != NULL && ctx->write) + ctx->image[ctx->idx] = data; + + ctx->idx++; +} + static inline void emit_a64_mov_i(const int is64, const int reg, const s32 val, struct jit_ctx *ctx) { @@ -174,6 +183,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx) emit(insn, ctx); } +static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx) +{ + if (IS_ENABLED(CONFIG_CFI_CLANG)) + emit_u32_data(hash, ctx); +} + /* * Kernel addresses in the vmalloc space use at most 48 bits, and the * remaining bits are guaranteed to be 0x1. So we can compose the address @@ -503,7 +518,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const u8 priv_sp = bpf2a64[PRIVATE_SP]; void __percpu *priv_stack_ptr; - const int idx0 = ctx->idx; int cur_offset; /* @@ -529,6 +543,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf) * */ + emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx); + const int idx0 = ctx->idx; + /* bpf function may be invoked by 3 instruction types: * 1. bl, attached via freplace to bpf prog via short jump * 2. br, attached via freplace to bpf prog via long jump @@ -2146,9 +2163,9 @@ skip_init_ctx: jit_data->ro_header = ro_header; } - prog->bpf_func = (void *)ctx.ro_image; + prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset(); prog->jited = 1; - prog->jited_len = prog_size; + prog->jited_len = prog_size - cfi_get_offset(); if (!prog->is_func || extra_pass) { int i; @@ -2527,6 +2544,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, /* return address locates above FP */ retaddr_off = stack_size + 8; + if (flags & BPF_TRAMP_F_INDIRECT) { + /* + * Indirect call for bpf_struct_ops + */ + emit_kcfi(cfi_get_func_hash(func_addr), ctx); + } /* bpf trampoline may be invoked by 3 instruction types: * 1. bl, attached to bpf prog or kernel function via short jump * 2. br, attached to bpf prog or kernel function via long jump @@ -3045,6 +3068,7 @@ void bpf_jit_free(struct bpf_prog *prog) sizeof(jit_data->header->size)); kfree(jit_data); } + prog->bpf_func -= cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); priv_stack_ptr = prog->aux->priv_stack_ptr; |