diff options
Diffstat (limited to 'arch/arm64/mm')
29 files changed, 5025 insertions, 2829 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 849c1df3d214..c26489cf96cd 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -1,13 +1,17 @@ # SPDX-License-Identifier: GPL-2.0 obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ - ioremap.o mmap.o pgd.o mmu.o \ - context.o proc.o pageattr.o + ioremap.o mmap.o pgd.o mem_encrypt.o mmu.o \ + context.o proc.o pageattr.o fixmap.o +obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o -obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o -obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_PTDUMP) += ptdump.o +obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o +obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o +obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o +obj-$(CONFIG_ARM64_MTE) += mteswap.o +obj-$(CONFIG_ARM64_GCS) += gcs.o KASAN_SANITIZE_physaddr.o += n obj-$(CONFIG_KASAN) += kasan_init.o diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index db767b072601..503567c864fd 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -15,7 +15,7 @@ #include <asm/asm-uaccess.h> /* - * flush_icache_range(start,end) + * caches_clean_inval_pou_macro(start,end) [fixup] * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, @@ -23,12 +23,27 @@ * * - start - virtual start address of region * - end - virtual end address of region + * - fixup - optional label to branch to on user fault */ -ENTRY(__flush_icache_range) - /* FALLTHROUGH */ +.macro caches_clean_inval_pou_macro, fixup +alternative_if ARM64_HAS_CACHE_IDC + dsb ishst + b .Ldc_skip_\@ +alternative_else_nop_endif + mov x2, x0 + mov x3, x1 + dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup +.Ldc_skip_\@: +alternative_if ARM64_HAS_CACHE_DIC + isb + b .Lic_skip_\@ +alternative_else_nop_endif + invalidate_icache_by_line x0, x1, x2, x3, \fixup +.Lic_skip_\@: +.endm /* - * __flush_cache_user_range(start,end) + * caches_clean_inval_pou(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, @@ -37,117 +52,97 @@ ENTRY(__flush_icache_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(__flush_cache_user_range) +SYM_FUNC_START(caches_clean_inval_pou) + caches_clean_inval_pou_macro + ret +SYM_FUNC_END(caches_clean_inval_pou) +SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou) + +/* + * caches_clean_inval_user_pou(start,end) + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +SYM_FUNC_START(caches_clean_inval_user_pou) uaccess_ttbr0_enable x2, x3, x4 -alternative_if ARM64_HAS_CACHE_IDC - dsb ishst - b 7f -alternative_else_nop_endif - dcache_line_size x2, x3 - sub x3, x2, #1 - bic x4, x0, x3 -1: -user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE - add x4, x4, x2 - cmp x4, x1 - b.lo 1b - dsb ish -7: -alternative_if ARM64_HAS_CACHE_DIC - isb - b 8f -alternative_else_nop_endif - invalidate_icache_by_line x0, x1, x2, x3, 9f -8: mov x0, #0 + caches_clean_inval_pou_macro 2f + mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 ret -9: +2: mov x0, #-EFAULT b 1b -ENDPROC(__flush_icache_range) -ENDPROC(__flush_cache_user_range) +SYM_FUNC_END(caches_clean_inval_user_pou) /* - * invalidate_icache_range(start,end) + * icache_inval_pou(start,end) * * Ensure that the I cache is invalid within specified region. * * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(invalidate_icache_range) +SYM_FUNC_START(icache_inval_pou) alternative_if ARM64_HAS_CACHE_DIC - mov x0, xzr isb ret alternative_else_nop_endif - uaccess_ttbr0_enable x2, x3, x4 - - invalidate_icache_by_line x0, x1, x2, x3, 2f - mov x0, xzr -1: - uaccess_ttbr0_disable x1, x2 + invalidate_icache_by_line x0, x1, x2, x3 ret -2: - mov x0, #-EFAULT - b 1b -ENDPROC(invalidate_icache_range) +SYM_FUNC_END(icache_inval_pou) /* - * __flush_dcache_area(kaddr, size) + * dcache_clean_inval_poc(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned and invalidated to the PoC. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ -ENTRY(__flush_dcache_area) +SYM_FUNC_START(__pi_dcache_clean_inval_poc) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__flush_dcache_area) +SYM_FUNC_END(__pi_dcache_clean_inval_poc) +SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc) /* - * __clean_dcache_area_pou(kaddr, size) + * dcache_clean_pou(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoU. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ -ENTRY(__clean_dcache_area_pou) +SYM_FUNC_START(dcache_clean_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret -ENDPROC(__clean_dcache_area_pou) +SYM_FUNC_END(dcache_clean_pou) /* - * __inval_dcache_area(kaddr, size) + * dcache_inval_poc(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are invalidated. Any partial lines at the ends of the interval are * also cleaned to PoC to prevent data loss. * - * - kaddr - kernel address - * - size - size in question - */ -ENTRY(__inval_dcache_area) - /* FALLTHROUGH */ - -/* - * __dma_inv_area(start, size) - * - start - virtual start address of region - * - size - size in question + * - start - kernel start address of region + * - end - kernel end address of region */ -__dma_inv_area: - add x1, x1, x0 +SYM_FUNC_START(__pi_dcache_inval_poc) dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? @@ -165,82 +160,38 @@ __dma_inv_area: b.lo 2b dsb sy ret -ENDPIPROC(__inval_dcache_area) -ENDPROC(__dma_inv_area) +SYM_FUNC_END(__pi_dcache_inval_poc) +SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc) /* - * __clean_dcache_area_poc(kaddr, size) + * dcache_clean_poc(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoC. * - * - kaddr - kernel address - * - size - size in question - */ -ENTRY(__clean_dcache_area_poc) - /* FALLTHROUGH */ - -/* - * __dma_clean_area(start, size) * - start - virtual start address of region - * - size - size in question + * - end - virtual end address of region */ -__dma_clean_area: +SYM_FUNC_START(__pi_dcache_clean_poc) dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_poc) -ENDPROC(__dma_clean_area) +SYM_FUNC_END(__pi_dcache_clean_poc) +SYM_FUNC_ALIAS(dcache_clean_poc, __pi_dcache_clean_poc) /* - * __clean_dcache_area_pop(kaddr, size) + * dcache_clean_pop(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoP. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ -ENTRY(__clean_dcache_area_pop) +SYM_FUNC_START(__pi_dcache_clean_pop) alternative_if_not ARM64_HAS_DCPOP - b __clean_dcache_area_poc + b dcache_clean_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_pop) - -/* - * __dma_flush_area(start, size) - * - * clean & invalidate D / U line - * - * - start - virtual start address of region - * - size - size in question - */ -ENTRY(__dma_flush_area) - dcache_by_line_op civac, sy, x0, x1, x2, x3 - ret -ENDPIPROC(__dma_flush_area) - -/* - * __dma_map_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(__dma_map_area) - cmp w2, #DMA_FROM_DEVICE - b.eq __dma_inv_area - b __dma_clean_area -ENDPIPROC(__dma_map_area) - -/* - * __dma_unmap_area(start, size, dir) - * - start - kernel virtual start address - * - size - size of region - * - dir - DMA direction - */ -ENTRY(__dma_unmap_area) - cmp w2, #DMA_TO_DEVICE - b.ne __dma_inv_area - ret -ENDPIPROC(__dma_unmap_area) +SYM_FUNC_END(__pi_dcache_clean_pop) +SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b5e329fde2dd..b2ac06246327 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -6,6 +6,7 @@ * Copyright (C) 2012 ARM Ltd. */ +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> @@ -26,35 +27,33 @@ static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +static unsigned long max_pinned_asids; +static unsigned long nr_pinned_asids; +static unsigned long *pinned_asid_map; + #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) -#define ASID_FIRST_VERSION (1UL << asid_bits) - -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) -#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) -#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) -#else -#define NUM_USER_ASIDS (ASID_FIRST_VERSION) -#define asid2idx(asid) ((asid) & ~ASID_MASK) -#define idx2asid(idx) asid2idx(idx) -#endif +#define ASID_FIRST_VERSION (1UL << 16) + +#define NUM_USER_ASIDS (1UL << asid_bits) +#define ctxid2asid(asid) ((asid) & ~ASID_MASK) +#define asid2ctxid(asid, genid) ((asid) | (genid)) /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) { u32 asid; int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), - ID_AA64MMFR0_ASID_SHIFT); + ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); switch (fld) { default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); - /* Fallthrough */ - case 0: + fallthrough; + case ID_AA64MMFR0_EL1_ASIDBITS_8: asid = 8; break; - case 2: + case ID_AA64MMFR0_EL1_ASIDBITS_16: asid = 16; } @@ -77,13 +76,38 @@ void verify_cpu_asid_bits(void) } } +static void set_kpti_asid_bits(unsigned long *map) +{ + unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); + /* + * In case of KPTI kernel/user ASIDs are allocated in + * pairs, the bottom bit distinguishes the two: if it + * is set, then the ASID will map only userspace. Thus + * mark even as reserved for kernel. + */ + memset(map, 0xaa, len); +} + +static void set_reserved_asid_bits(void) +{ + if (pinned_asid_map) + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + else if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(asid_map); + else + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); +} + +#define asid_gen_match(asid) \ + (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits)) + static void flush_context(void) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + set_reserved_asid_bits(); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -96,7 +120,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(ctxid2asid(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -138,7 +162,7 @@ static u64 new_context(struct mm_struct *mm) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { - u64 newasid = generation | (asid & ~ASID_MASK); + u64 newasid = asid2ctxid(ctxid2asid(asid), generation); /* * If our current ASID was active during a rollover, we @@ -148,10 +172,18 @@ static u64 new_context(struct mm_struct *mm) return newasid; /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_asids. + */ + if (refcount_read(&mm->context.pinned)) + return newasid; + + /* * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid; } @@ -177,12 +209,13 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return idx2asid(asid) | generation; + return asid2ctxid(asid, generation); } -void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) +void check_and_switch_context(struct mm_struct *mm) { unsigned long flags; + unsigned int cpu; u64 asid, old_active_asid; if (system_supports_cnp()) @@ -204,25 +237,25 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) * relaxed xchg in flush_context will treat us as reserved * because atomic RmWs are totally ordered for a given location. */ - old_active_asid = atomic64_read(&per_cpu(active_asids, cpu)); - if (old_active_asid && - !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) && - atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu), + old_active_asid = atomic64_read(this_cpu_ptr(&active_asids)); + if (old_active_asid && asid_gen_match(asid) && + atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids), old_active_asid, asid)) goto switch_mm_fastpath; raw_spin_lock_irqsave(&cpu_asid_lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(&mm->context.id); - if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { + if (!asid_gen_match(asid)) { asid = new_context(mm); atomic64_set(&mm->context.id, asid); } + cpu = smp_processor_id(); if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) local_flush_tlb_all(); - atomic64_set(&per_cpu(active_asids, cpu), asid); + atomic64_set(this_cpu_ptr(&active_asids), asid); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: @@ -237,31 +270,153 @@ switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } +unsigned long arm64_mm_context_get(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + if (!pinned_asid_map) + return 0; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + asid = atomic64_read(&mm->context.id); + + if (refcount_inc_not_zero(&mm->context.pinned)) + goto out_unlock; + + if (nr_pinned_asids >= max_pinned_asids) { + asid = 0; + goto out_unlock; + } + + if (!asid_gen_match(asid)) { + /* + * We went through one or more rollover since that ASID was + * used. Ensure that it is still valid, or generate a new one. + */ + asid = new_context(mm); + atomic64_set(&mm->context.id, asid); + } + + nr_pinned_asids++; + __set_bit(ctxid2asid(asid), pinned_asid_map); + refcount_set(&mm->context.pinned, 1); + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + asid = ctxid2asid(asid); + + /* Set the equivalent of USER_ASID_BIT */ + if (asid && arm64_kernel_unmapped_at_el0()) + asid |= 1; + + return asid; +} +EXPORT_SYMBOL_GPL(arm64_mm_context_get); + +void arm64_mm_context_put(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid = atomic64_read(&mm->context.id); + + if (!pinned_asid_map) + return; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + if (refcount_dec_and_test(&mm->context.pinned)) { + __clear_bit(ctxid2asid(asid), pinned_asid_map); + nr_pinned_asids--; + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +EXPORT_SYMBOL_GPL(arm64_mm_context_put); + /* Errata workaround post TTBRx_EL1 update. */ asmlinkage void post_ttbr_update_workaround(void) { + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) + return; + asm(ALTERNATIVE("nop; nop; nop", "ic iallu; dsb nsh; isb", - ARM64_WORKAROUND_CAVIUM_27456, - CONFIG_CAVIUM_ERRATUM_27456)); + ARM64_WORKAROUND_CAVIUM_27456)); } -static int asids_init(void) +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) { - asid_bits = get_cpu_asid_bits(); + unsigned long ttbr1 = read_sysreg(ttbr1_el1); + unsigned long asid = ASID(mm); + unsigned long ttbr0 = phys_to_ttbr(pgd_phys); + + /* Skip CNP for the reserved ASID */ + if (system_supports_cnp() && asid) + ttbr0 |= TTBR_CNP_BIT; + + /* SW PAN needs a copy of the ASID in TTBR0 for entry */ + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN)) + ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + /* Set ASID in TTBR1 since TCR.A1 is set */ + ttbr1 &= ~TTBR_ASID_MASK; + ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + cpu_set_reserved_ttbr0_nosync(); + write_sysreg(ttbr1, ttbr1_el1); + write_sysreg(ttbr0, ttbr0_el1); + isb(); + post_ttbr_update_workaround(); +} + +static int asids_update_limit(void) +{ + unsigned long num_available_asids = NUM_USER_ASIDS; + + if (arm64_kernel_unmapped_at_el0()) { + num_available_asids /= 2; + if (pinned_asid_map) + set_kpti_asid_bits(pinned_asid_map); + } /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is reserved for init_mm. */ - WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); + WARN_ON(num_available_asids - 1 <= num_possible_cpus()); + pr_info("ASID allocator initialised with %lu entries\n", + num_available_asids); + + /* + * There must always be an ASID available after rollover. Ensure that, + * even if all CPUs have a reserved ASID and the maximum number of ASIDs + * are pinned, there still is at least one empty slot in the ASID map. + */ + max_pinned_asids = num_available_asids - num_possible_cpus() - 2; + return 0; +} +arch_initcall(asids_update_limit); + +static int asids_init(void) +{ + asid_bits = get_cpu_asid_bits(); atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), - GFP_KERNEL); + asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); if (!asid_map) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); + pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); + nr_pinned_asids = 0; + + /* + * We cannot call set_reserved_asid_bits() here because CPU + * caps are not finalized yet, so it is safer to assume KPTI + * and reserve kernel ASID's from beginning. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + set_kpti_asid_bits(asid_map); return 0; } early_initcall(asids_init); diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c new file mode 100644 index 000000000000..bcac4f55f9c1 --- /dev/null +++ b/arch/arm64/mm/contpte.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include <linux/mm.h> +#include <linux/efi.h> +#include <linux/export.h> +#include <asm/tlbflush.h> + +static inline bool mm_is_user(struct mm_struct *mm) +{ + /* + * Don't attempt to apply the contig bit to kernel mappings, because + * dynamically adding/removing the contig bit can cause page faults. + * These racing faults are ok for user space, since they get serialized + * on the PTL. But kernel mappings can't tolerate faults. + */ + if (unlikely(mm_is_efi(mm))) + return false; + return mm != &init_mm; +} + +static inline pte_t *contpte_align_down(pte_t *ptep) +{ + return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); +} + +static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * Unfold any partially covered contpte block at the beginning and end + * of the range. + */ + + if (ptep != contpte_align_down(ptep) || nr < CONT_PTES) + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + + if (ptep + nr != contpte_align_down(ptep + nr)) { + unsigned long last_addr = addr + PAGE_SIZE * (nr - 1); + pte_t *last_ptep = ptep + nr - 1; + + contpte_try_unfold(mm, last_addr, last_ptep, + __ptep_get(last_ptep)); + } +} + +static void contpte_convert(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long start_addr; + pte_t *start_ptep; + int i; + + start_ptep = ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte)); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) { + pte_t ptent = __ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(ptent)) + pte = pte_mkdirty(pte); + + if (pte_young(ptent)) + pte = pte_mkyoung(pte); + } + + __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); + + __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); +} + +void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the virtual and pysical addresses are + * correctly aligned for a contpte mapping in contpte_try_fold() so the + * remaining checks are to ensure that the contpte range is fully + * covered by a single folio, and ensure that all the ptes are valid + * with contiguous PFNs and matching prots. We ignore the state of the + * access and dirty bits for the purpose of deciding if its a contiguous + * range; the folding process will generate a single contpte entry which + * has a single access and dirty bit. Those 2 bits are the logical OR of + * their respective bits in the constituent pte entries. In order to + * ensure the contpte range is covered by a single folio, we must + * recover the folio from the pfn, but special mappings don't have a + * folio backing them. Fortunately contpte_try_fold() already checked + * that the pte is not special - we never try to fold special mappings. + * Note we can't use vm_normal_page() for this since we don't have the + * vma. + */ + + unsigned long folio_start, folio_end; + unsigned long cont_start, cont_end; + pte_t expected_pte, subpte; + struct folio *folio; + struct page *page; + unsigned long pfn; + pte_t *orig_ptep; + pgprot_t prot; + + int i; + + if (!mm_is_user(mm)) + return; + + page = pte_page(pte); + folio = page_folio(page); + folio_start = addr - (page - &folio->page) * PAGE_SIZE; + folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE; + cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE); + cont_end = cont_start + CONT_PTE_SIZE; + + if (folio_start > cont_start || folio_end < cont_end) + return; + + pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + expected_pte = pfn_pte(pfn, prot); + orig_ptep = ptep; + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++) { + subpte = pte_mkold(pte_mkclean(__ptep_get(ptep))); + if (!pte_same(subpte, expected_pte)) + return; + expected_pte = pte_advance_pfn(expected_pte, 1); + ptep++; + } + + pte = pte_mkcont(pte); + contpte_convert(mm, addr, orig_ptep, pte); +} +EXPORT_SYMBOL_GPL(__contpte_try_fold); + +void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the ptes are contiguous in + * contpte_try_unfold(), so just check that the mm is user space. + */ + if (!mm_is_user(mm)) + return; + + pte = pte_mknoncont(pte); + contpte_convert(mm, addr, ptep, pte); +} +EXPORT_SYMBOL_GPL(__contpte_try_unfold); + +pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) +{ + /* + * Gather access/dirty bits, which may be populated in any of the ptes + * of the contig range. We are guaranteed to be holding the PTL, so any + * contiguous range cannot be unfolded or otherwise modified under our + * feet. + */ + + pte_t pte; + int i; + + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++) { + pte = __ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL_GPL(contpte_ptep_get); + +pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) +{ + /* + * The ptep_get_lockless() API requires us to read and return *orig_ptep + * so that it is self-consistent, without the PTL held, so we may be + * racing with other threads modifying the pte. Usually a READ_ONCE() + * would suffice, but for the contpte case, we also need to gather the + * access and dirty bits from across all ptes in the contiguous block, + * and we can't read all of those neighbouring ptes atomically, so any + * contiguous range may be unfolded/modified/refolded under our feet. + * Therefore we ensure we read a _consistent_ contpte range by checking + * that all ptes in the range are valid and have CONT_PTE set, that all + * pfns are contiguous and that all pgprots are the same (ignoring + * access/dirty). If we find a pte that is not consistent, then we must + * be racing with an update so start again. If the target pte does not + * have CONT_PTE set then that is considered consistent on its own + * because it is not part of a contpte range. + */ + + pgprot_t orig_prot; + unsigned long pfn; + pte_t orig_pte; + pgprot_t prot; + pte_t *ptep; + pte_t pte; + int i; + +retry: + orig_pte = __ptep_get(orig_ptep); + + if (!pte_valid_cont(orig_pte)) + return orig_pte; + + orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte))); + ptep = contpte_align_down(orig_ptep); + pfn = pte_pfn(orig_pte) - (orig_ptep - ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) { + pte = __ptep_get(ptep); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + + if (!pte_valid_cont(pte) || + pte_pfn(pte) != pfn || + pgprot_val(prot) != pgprot_val(orig_prot)) + goto retry; + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless); + +void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + unsigned long next; + unsigned long end; + unsigned long pfn; + pgprot_t prot; + + /* + * The set_ptes() spec guarantees that when nr > 1, the initial state of + * all ptes is not-present. Therefore we never need to unfold or + * otherwise invalidate a range before we set the new ptes. + * contpte_set_ptes() should never be called for nr < 2. + */ + VM_WARN_ON(nr == 1); + + if (!mm_is_user(mm)) + return __set_ptes(mm, addr, ptep, pte, nr); + + end = addr + (nr << PAGE_SHIFT); + pfn = pte_pfn(pte); + prot = pte_pgprot(pte); + + do { + next = pte_cont_addr_end(addr, end); + nr = (next - addr) >> PAGE_SHIFT; + pte = pfn_pte(pfn, prot); + + if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0) + pte = pte_mkcont(pte); + else + pte = pte_mknoncont(pte); + + __set_ptes(mm, addr, ptep, pte, nr); + + addr = next; + ptep += nr; + pfn += nr; + + } while (addr != end); +} +EXPORT_SYMBOL_GPL(contpte_set_ptes); + +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + __clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL_GPL(contpte_clear_full_ptes); + +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); + +int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* + * ptep_clear_flush_young() technically requires us to clear the access + * flag for a _single_ pte. However, the core-mm code actually tracks + * access/dirty per folio, not per page. And since we only create a + * contig range when the range is covered by a single folio, we can get + * away with clearing young for the whole contig range here, so we avoid + * having to unfold. + */ + + int young = 0; + int i; + + ptep = contpte_align_down(ptep); + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + young |= __ptep_test_and_clear_young(vma, addr, ptep); + + return young; +} +EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); + +int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + int young; + + young = contpte_ptep_test_and_clear_young(vma, addr, ptep); + + if (young) { + /* + * See comment in __ptep_clear_flush_young(); same rationale for + * eliding the trailing DSB applies here. + */ + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, + PAGE_SIZE, true, 3); + } + + return young; +} +EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); + +void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * If wrprotecting an entire contig range, we can avoid unfolding. Just + * set wrprotect and wait for the later mmu_gather flush to invalidate + * the tlb. Until the flush, the page may or may not be wrprotected. + * After the flush, it is guaranteed wrprotected. If it's a partial + * range though, we must unfold, because we can't have a case where + * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this + * would cause it to continue to be unpredictable after the flush. + */ + + contpte_try_unfold_partial(mm, addr, ptep, nr); + __wrprotect_ptes(mm, addr, ptep, nr); +} +EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes); + +void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr, cydp_t flags) +{ + /* + * We can safely clear access/dirty without needing to unfold from + * the architectures perspective, even when contpte is set. If the + * range starts or ends midway through a contpte block, we can just + * expand to include the full contpte block. While this is not + * exactly what the core-mm asked for, it tracks access/dirty per + * folio, not per page. And since we only create a contpte block + * when it is covered by a single folio, we can get away with + * clearing access/dirty for the whole block. + */ + unsigned long start = addr; + unsigned long end = start + nr * PAGE_SIZE; + + if (pte_cont(__ptep_get(ptep + nr - 1))) + end = ALIGN(end, CONT_PTE_SIZE); + + if (pte_cont(__ptep_get(ptep))) { + start = ALIGN_DOWN(start, CONT_PTE_SIZE); + ptep = contpte_align_down(ptep); + } + + __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags); +} +EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes); + +int contpte_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + unsigned long start_addr; + pte_t orig_pte; + int i; + + /* + * Gather the access/dirty bits for the contiguous range. If nothing has + * changed, its a noop. + */ + orig_pte = pte_mknoncont(ptep_get(ptep)); + if (pte_val(orig_pte) == pte_val(entry)) + return 0; + + /* + * We can fix up access/dirty bits without having to unfold the contig + * range. But if the write bit is changing, we must unfold. + */ + if (pte_write(orig_pte) == pte_write(entry)) { + /* + * For HW access management, we technically only need to update + * the flag on a single pte in the range. But for SW access + * management, we need to update all the ptes to prevent extra + * faults. Avoid per-page tlb flush in __ptep_set_access_flags() + * and instead flush the whole range at the end. + */ + ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + /* + * We are not advancing entry because __ptep_set_access_flags() + * only consumes access flags from entry. And since we have checked + * for the whole contpte block and returned early, pte_same() + * within __ptep_set_access_flags() is likely false. + */ + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + __ptep_set_access_flags(vma, addr, ptep, entry, 0); + + if (dirty) + __flush_tlb_range(vma, start_addr, addr, + PAGE_SIZE, true, 3); + } else { + __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte); + __ptep_set_access_flags(vma, addr, ptep, entry, dirty); + } + + return 1; +} +EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags); diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 2ee7b73433a5..a86c897017df 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -6,21 +6,64 @@ * Copyright (C) 2012 ARM Ltd. */ +#include <linux/bitops.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/mte.h> -void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void copy_highpage(struct page *to, struct page *from) { - struct page *page = virt_to_page(kto); + void *kto = page_address(to); + void *kfrom = page_address(from); + struct folio *src = page_folio(from); + struct folio *dst = page_folio(to); + unsigned int i, nr_pages; + copy_page(kto, kfrom); - flush_dcache_page(page); + + if (kasan_hw_tags_enabled()) + page_kasan_tag_reset(to); + + if (!system_supports_mte()) + return; + + if (folio_test_hugetlb(src)) { + if (!folio_test_hugetlb_mte_tagged(src) || + from != folio_page(src, 0)) + return; + + WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst)); + + /* + * Populate tags for all subpages. + * + * Don't assume the first page is head page since + * huge page copy may start from any subpage. + */ + nr_pages = folio_nr_pages(src); + for (i = 0; i < nr_pages; i++) { + kfrom = page_address(folio_page(src, i)); + kto = page_address(folio_page(dst, i)); + mte_copy_page_tags(kto, kfrom); + } + folio_set_hugetlb_mte_tagged(dst); + } else if (page_mte_tagged(from)) { + /* It's a new page, shouldn't have been tagged yet */ + WARN_ON_ONCE(!try_page_mte_tagging(to)); + + mte_copy_page_tags(kto, kfrom); + set_page_mte_tagged(to); + } } -EXPORT_SYMBOL_GPL(__cpu_copy_user_page); +EXPORT_SYMBOL(copy_highpage); -void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) { - clear_page(kaddr); + copy_highpage(to, from); + flush_dcache_page(to); } -EXPORT_SYMBOL_GPL(__cpu_clear_user_page); +EXPORT_SYMBOL_GPL(copy_user_highpage); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 6c45350e33aa..b2b5792b2caa 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -6,39 +6,39 @@ #include <linux/gfp.h> #include <linux/cache.h> -#include <linux/dma-noncoherent.h> -#include <linux/dma-iommu.h> +#include <linux/dma-map-ops.h> #include <xen/xen.h> -#include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> +#include <asm/xen/xen-ops.h> void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir) { - __dma_map_area(phys_to_virt(paddr), size, dir); + unsigned long start = (unsigned long)phys_to_virt(paddr); + + dcache_clean_poc(start, start + size); } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir) { - __dma_unmap_area(phys_to_virt(paddr), size, dir); + unsigned long start = (unsigned long)phys_to_virt(paddr); + + if (dir == DMA_TO_DEVICE) + return; + + dcache_inval_poc(start, start + size); } void arch_dma_prep_coherent(struct page *page, size_t size) { - __dma_flush_area(page_address(page), size); -} + unsigned long start = (unsigned long)page_address(page); -#ifdef CONFIG_IOMMU_DMA -void arch_teardown_dma_ops(struct device *dev) -{ - dev->dma_ops = NULL; + dcache_clean_poc(start, start + size); } -#endif -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { int cls = cache_line_size_of_cpu(); @@ -49,11 +49,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ARCH_DMA_MINALIGN, cls); dev->dma_coherent = coherent; - if (iommu) - iommu_setup_dma_ops(dev, dma_base, size); -#ifdef CONFIG_XEN - if (xen_initial_domain()) - dev->dma_ops = &xen_swiotlb_dma_ops; -#endif + xen_setup_dma_ops(dev); } diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c deleted file mode 100644 index 0a920b538a89..000000000000 --- a/arch/arm64/mm/dump.c +++ /dev/null @@ -1,423 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014, The Linux Foundation. All rights reserved. - * Debug helper to dump the current kernel pagetables of the system - * so that we can see what the various memory ranges are set to. - * - * Derived from x86 and arm implementation: - * (C) Copyright 2008 Intel Corporation - * - * Author: Arjan van de Ven <arjan@linux.intel.com> - */ -#include <linux/debugfs.h> -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/io.h> -#include <linux/init.h> -#include <linux/mm.h> -#include <linux/sched.h> -#include <linux/seq_file.h> - -#include <asm/fixmap.h> -#include <asm/kasan.h> -#include <asm/memory.h> -#include <asm/pgtable.h> -#include <asm/pgtable-hwdef.h> -#include <asm/ptdump.h> - - -enum address_markers_idx { - PAGE_OFFSET_NR = 0, - PAGE_END_NR, -#ifdef CONFIG_KASAN - KASAN_START_NR, -#endif -}; - -static struct addr_marker address_markers[] = { - { PAGE_OFFSET, "Linear Mapping start" }, - { 0 /* PAGE_END */, "Linear Mapping end" }, -#ifdef CONFIG_KASAN - { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, - { KASAN_SHADOW_END, "Kasan shadow end" }, -#endif - { MODULES_VADDR, "Modules start" }, - { MODULES_END, "Modules end" }, - { VMALLOC_START, "vmalloc() area" }, - { VMALLOC_END, "vmalloc() end" }, - { FIXADDR_START, "Fixmap start" }, - { FIXADDR_TOP, "Fixmap end" }, - { PCI_IO_START, "PCI I/O start" }, - { PCI_IO_END, "PCI I/O end" }, -#ifdef CONFIG_SPARSEMEM_VMEMMAP - { VMEMMAP_START, "vmemmap start" }, - { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, -#endif - { -1, NULL }, -}; - -#define pt_dump_seq_printf(m, fmt, args...) \ -({ \ - if (m) \ - seq_printf(m, fmt, ##args); \ -}) - -#define pt_dump_seq_puts(m, fmt) \ -({ \ - if (m) \ - seq_printf(m, fmt); \ -}) - -/* - * The page dumper groups page table entries of the same type into a single - * description. It uses pg_state to track the range information while - * iterating over the pte entries. When the continuity is broken it then - * dumps out a description of the range. - */ -struct pg_state { - struct seq_file *seq; - const struct addr_marker *marker; - unsigned long start_address; - unsigned level; - u64 current_prot; - bool check_wx; - unsigned long wx_pages; - unsigned long uxn_pages; -}; - -struct prot_bits { - u64 mask; - u64 val; - const char *set; - const char *clear; -}; - -static const struct prot_bits pte_bits[] = { - { - .mask = PTE_VALID, - .val = PTE_VALID, - .set = " ", - .clear = "F", - }, { - .mask = PTE_USER, - .val = PTE_USER, - .set = "USR", - .clear = " ", - }, { - .mask = PTE_RDONLY, - .val = PTE_RDONLY, - .set = "ro", - .clear = "RW", - }, { - .mask = PTE_PXN, - .val = PTE_PXN, - .set = "NX", - .clear = "x ", - }, { - .mask = PTE_SHARED, - .val = PTE_SHARED, - .set = "SHD", - .clear = " ", - }, { - .mask = PTE_AF, - .val = PTE_AF, - .set = "AF", - .clear = " ", - }, { - .mask = PTE_NG, - .val = PTE_NG, - .set = "NG", - .clear = " ", - }, { - .mask = PTE_CONT, - .val = PTE_CONT, - .set = "CON", - .clear = " ", - }, { - .mask = PTE_TABLE_BIT, - .val = PTE_TABLE_BIT, - .set = " ", - .clear = "BLK", - }, { - .mask = PTE_UXN, - .val = PTE_UXN, - .set = "UXN", - .clear = " ", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), - .set = "DEVICE/nGnRnE", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), - .set = "DEVICE/nGnRE", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_DEVICE_GRE), - .set = "DEVICE/GRE", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_NORMAL_NC), - .set = "MEM/NORMAL-NC", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_NORMAL), - .set = "MEM/NORMAL", - } -}; - -struct pg_level { - const struct prot_bits *bits; - const char *name; - size_t num; - u64 mask; -}; - -static struct pg_level pg_level[] = { - { - }, { /* pgd */ - .name = "PGD", - .bits = pte_bits, - .num = ARRAY_SIZE(pte_bits), - }, { /* pud */ - .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", - .bits = pte_bits, - .num = ARRAY_SIZE(pte_bits), - }, { /* pmd */ - .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", - .bits = pte_bits, - .num = ARRAY_SIZE(pte_bits), - }, { /* pte */ - .name = "PTE", - .bits = pte_bits, - .num = ARRAY_SIZE(pte_bits), - }, -}; - -static void dump_prot(struct pg_state *st, const struct prot_bits *bits, - size_t num) -{ - unsigned i; - - for (i = 0; i < num; i++, bits++) { - const char *s; - - if ((st->current_prot & bits->mask) == bits->val) - s = bits->set; - else - s = bits->clear; - - if (s) - pt_dump_seq_printf(st->seq, " %s", s); - } -} - -static void note_prot_uxn(struct pg_state *st, unsigned long addr) -{ - if (!st->check_wx) - return; - - if ((st->current_prot & PTE_UXN) == PTE_UXN) - return; - - WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n", - (void *)st->start_address, (void *)st->start_address); - - st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; -} - -static void note_prot_wx(struct pg_state *st, unsigned long addr) -{ - if (!st->check_wx) - return; - if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) - return; - if ((st->current_prot & PTE_PXN) == PTE_PXN) - return; - - WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n", - (void *)st->start_address, (void *)st->start_address); - - st->wx_pages += (addr - st->start_address) / PAGE_SIZE; -} - -static void note_page(struct pg_state *st, unsigned long addr, unsigned level, - u64 val) -{ - static const char units[] = "KMGTPE"; - u64 prot = val & pg_level[level].mask; - - if (!st->level) { - st->level = level; - st->current_prot = prot; - st->start_address = addr; - pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); - } else if (prot != st->current_prot || level != st->level || - addr >= st->marker[1].start_address) { - const char *unit = units; - unsigned long delta; - - if (st->current_prot) { - note_prot_uxn(st, addr); - note_prot_wx(st, addr); - pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", - st->start_address, addr); - - delta = (addr - st->start_address) >> 10; - while (!(delta & 1023) && unit[1]) { - delta >>= 10; - unit++; - } - pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit, - pg_level[st->level].name); - if (pg_level[st->level].bits) - dump_prot(st, pg_level[st->level].bits, - pg_level[st->level].num); - pt_dump_seq_puts(st->seq, "\n"); - } - - if (addr >= st->marker[1].start_address) { - st->marker++; - pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); - } - - st->start_address = addr; - st->current_prot = prot; - st->level = level; - } - - if (addr >= st->marker[1].start_address) { - st->marker++; - pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); - } - -} - -static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, - unsigned long end) -{ - unsigned long addr = start; - pte_t *ptep = pte_offset_kernel(pmdp, start); - - do { - note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); - } while (ptep++, addr += PAGE_SIZE, addr != end); -} - -static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, - unsigned long end) -{ - unsigned long next, addr = start; - pmd_t *pmdp = pmd_offset(pudp, start); - - do { - pmd_t pmd = READ_ONCE(*pmdp); - next = pmd_addr_end(addr, end); - - if (pmd_none(pmd) || pmd_sect(pmd)) { - note_page(st, addr, 3, pmd_val(pmd)); - } else { - BUG_ON(pmd_bad(pmd)); - walk_pte(st, pmdp, addr, next); - } - } while (pmdp++, addr = next, addr != end); -} - -static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, - unsigned long end) -{ - unsigned long next, addr = start; - pud_t *pudp = pud_offset(pgdp, start); - - do { - pud_t pud = READ_ONCE(*pudp); - next = pud_addr_end(addr, end); - - if (pud_none(pud) || pud_sect(pud)) { - note_page(st, addr, 2, pud_val(pud)); - } else { - BUG_ON(pud_bad(pud)); - walk_pmd(st, pudp, addr, next); - } - } while (pudp++, addr = next, addr != end); -} - -static void walk_pgd(struct pg_state *st, struct mm_struct *mm, - unsigned long start) -{ - unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; - unsigned long next, addr = start; - pgd_t *pgdp = pgd_offset(mm, start); - - do { - pgd_t pgd = READ_ONCE(*pgdp); - next = pgd_addr_end(addr, end); - - if (pgd_none(pgd)) { - note_page(st, addr, 1, pgd_val(pgd)); - } else { - BUG_ON(pgd_bad(pgd)); - walk_pud(st, pgdp, addr, next); - } - } while (pgdp++, addr = next, addr != end); -} - -void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) -{ - struct pg_state st = { - .seq = m, - .marker = info->markers, - }; - - walk_pgd(&st, info->mm, info->base_addr); - - note_page(&st, 0, 0, 0); -} - -static void ptdump_initialize(void) -{ - unsigned i, j; - - for (i = 0; i < ARRAY_SIZE(pg_level); i++) - if (pg_level[i].bits) - for (j = 0; j < pg_level[i].num; j++) - pg_level[i].mask |= pg_level[i].bits[j].mask; -} - -static struct ptdump_info kernel_ptdump_info = { - .mm = &init_mm, - .markers = address_markers, - .base_addr = PAGE_OFFSET, -}; - -void ptdump_check_wx(void) -{ - struct pg_state st = { - .seq = NULL, - .marker = (struct addr_marker[]) { - { 0, NULL}, - { -1, NULL}, - }, - .check_wx = true, - }; - - walk_pgd(&st, &init_mm, PAGE_OFFSET); - note_page(&st, 0, 0, 0); - if (st.wx_pages || st.uxn_pages) - pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", - st.wx_pages, st.uxn_pages); - else - pr_info("Checked W+X mappings: passed, no W+X pages found\n"); -} - -static int ptdump_init(void) -{ - address_markers[PAGE_END_NR].start_address = PAGE_END; -#ifdef CONFIG_KASAN - address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; -#endif - ptdump_initialize(); - ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); - return 0; -} -device_initcall(ptdump_init); diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 81e694af5f8c..6e0528831cd3 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -3,16 +3,114 @@ * Based on arch/arm/mm/extable.c */ +#include <linux/bitfield.h> #include <linux/extable.h> #include <linux/uaccess.h> -int fixup_exception(struct pt_regs *regs) +#include <asm/asm-extable.h> +#include <asm/esr.h> +#include <asm/ptrace.h> + +static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex, + unsigned long esr) +{ + bool uaccess_is_write = FIELD_GET(EX_DATA_UACCESS_WRITE, ex->data); + bool fault_on_write = esr & ESR_ELx_WNR; + + return uaccess_is_write == fault_on_write; +} + +bool insn_may_access_user(unsigned long addr, unsigned long esr) +{ + const struct exception_table_entry *ex = search_exception_tables(addr); + + if (!ex) + return false; + + switch (ex->type) { + case EX_TYPE_UACCESS_CPY: + return cpy_faulted_on_uaccess(ex, esr); + default: + return true; + } +} + +static inline unsigned long +get_ex_fixup(const struct exception_table_entry *ex) +{ + return ((unsigned long)&ex->fixup + ex->fixup); +} + +static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); + int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data); + + pt_regs_write_reg(regs, reg_err, -EFAULT); + pt_regs_write_reg(regs, reg_zero, 0); + + regs->pc = get_ex_fixup(ex); + return true; +} + +static bool ex_handler_uaccess_cpy(const struct exception_table_entry *ex, + struct pt_regs *regs, unsigned long esr) +{ + /* Do not fix up faults on kernel memory accesses */ + if (!cpy_faulted_on_uaccess(ex, esr)) + return false; + + regs->pc = get_ex_fixup(ex); + return true; +} + +static bool +ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data); + int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); + unsigned long data, addr, offset; + + addr = pt_regs_read_reg(regs, reg_addr); + + offset = addr & 0x7UL; + addr &= ~0x7UL; + + data = *(unsigned long*)addr; + +#ifndef __AARCH64EB__ + data >>= 8 * offset; +#else + data <<= 8 * offset; +#endif + + pt_regs_write_reg(regs, reg_data, data); + + regs->pc = get_ex_fixup(ex); + return true; +} + +bool fixup_exception(struct pt_regs *regs, unsigned long esr) { - const struct exception_table_entry *fixup; + const struct exception_table_entry *ex; + + ex = search_exception_tables(instruction_pointer(regs)); + if (!ex) + return false; - fixup = search_exception_tables(instruction_pointer(regs)); - if (fixup) - regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + switch (ex->type) { + case EX_TYPE_BPF: + return ex_handler_bpf(ex, regs); + case EX_TYPE_UACCESS_ERR_ZERO: + case EX_TYPE_KACCESS_ERR_ZERO: + return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_UACCESS_CPY: + return ex_handler_uaccess_cpy(ex, regs, esr); + case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: + return ex_handler_load_unaligned_zeropad(ex, regs); + } - return fixup != NULL; + BUG(); } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 077b02a2d4d3..ec0a337891dd 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -10,10 +10,12 @@ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/extable.h> +#include <linux/kfence.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> #include <linux/init.h> +#include <linux/kasan.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> @@ -21,6 +23,7 @@ #include <linux/sched/debug.h> #include <linux/highmem.h> #include <linux/perf_event.h> +#include <linux/pkeys.h> #include <linux/preempt.h> #include <linux/hugetlb.h> @@ -28,20 +31,21 @@ #include <asm/bug.h> #include <asm/cmpxchg.h> #include <asm/cpufeature.h> +#include <asm/efi.h> #include <asm/exception.h> #include <asm/daifflags.h> #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/kprobes.h> +#include <asm/mte.h> #include <asm/processor.h> #include <asm/sysreg.h> #include <asm/system_misc.h> -#include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/traps.h> struct fault_info { - int (*fn)(unsigned long addr, unsigned int esr, + int (*fn)(unsigned long far, unsigned long esr, struct pt_regs *regs); int sig; int code; @@ -51,18 +55,20 @@ struct fault_info { static const struct fault_info fault_info[]; static struct fault_info debug_fault_info[]; -static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_fault_info(unsigned long esr) { return fault_info + (esr & ESR_ELx_FSC); } -static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) +static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) { return debug_fault_info + DBG_ESR_EVT(esr); } -static void data_abort_decode(unsigned int esr) +static void data_abort_decode(unsigned long esr) { + unsigned long iss2 = ESR_ELx_ISS2(esr); + pr_alert("Data abort info:\n"); if (esr & ESR_ELx_ISV) { @@ -75,19 +81,28 @@ static void data_abort_decode(unsigned int esr) (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); } else { - pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); + pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n", + esr & ESR_ELx_ISS_MASK, iss2); } - pr_alert(" CM = %lu, WnR = %lu\n", + pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n", (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, - (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); + (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT, + (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT, + (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT); + + pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n", + (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT, + (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT, + (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT, + (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT); } -static void mem_abort_decode(unsigned int esr) +static void mem_abort_decode(unsigned long esr) { pr_alert("Mem abort info:\n"); - pr_alert(" ESR = 0x%08x\n", esr); + pr_alert(" ESR = 0x%016lx\n", esr); pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", ESR_ELx_EC(esr), esr_get_class_string(esr), (esr & ESR_ELx_IL) ? 32 : 16); @@ -97,6 +112,8 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); + pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), + esr_to_fault_info(esr)->name); if (esr_is_data_abort(esr)) data_abort_decode(esr); @@ -145,6 +162,7 @@ static void show_pte(unsigned long addr) pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); do { + p4d_t *p4dp, p4d; pud_t *pudp, pud; pmd_t *pmdp, pmd; pte_t *ptep, pte; @@ -152,7 +170,13 @@ static void show_pte(unsigned long addr) if (pgd_none(pgd) || pgd_bad(pgd)) break; - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + pr_cont(", p4d=%016llx", p4d_val(p4d)); + if (p4d_none(p4d) || p4d_bad(p4d)) + break; + + pudp = pud_offset(p4dp, addr); pud = READ_ONCE(*pudp); pr_cont(", pud=%016llx", pud_val(pud)); if (pud_none(pud) || pud_bad(pud)) @@ -165,7 +189,10 @@ static void show_pte(unsigned long addr) break; ptep = pte_offset_map(pmdp, addr); - pte = READ_ONCE(*ptep); + if (!ptep) + break; + + pte = __ptep_get(ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); } while(0); @@ -179,16 +206,16 @@ static void show_pte(unsigned long addr) * * It needs to cope with hardware update of the accessed/dirty state by other * agents in the system and can safely skip the __sync_icache_dcache() call as, - * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * like __set_ptes(), the PTE is never changed from no-exec to exec here. * * Returns whether or not the PTE actually changed. */ -int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty) +int __ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) { pteval_t old_pteval, pteval; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = __ptep_get(ptep); if (pte_same(pte, entry)) return 0; @@ -212,49 +239,52 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); } while (pteval != old_pteval); - flush_tlb_fix_spurious_fault(vma, address); + /* Invalidate a stale read-only entry */ + if (dirty) + flush_tlb_page(vma, address); return 1; } -static bool is_el1_instruction_abort(unsigned int esr) +static bool is_el1_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } -static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, - struct pt_regs *regs) +static bool is_el1_data_abort(unsigned long esr) { - unsigned int ec = ESR_ELx_EC(esr); - unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; + return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; +} - if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) +static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, + struct pt_regs *regs) +{ + if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) return false; - if (fsc_type == ESR_ELx_FSC_PERM) + if (esr_fsc_is_permission_fault(esr)) return true; if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) - return fsc_type == ESR_ELx_FSC_FAULT && + return esr_fsc_is_translation_fault(esr) && (regs->pstate & PSR_PAN_BIT); return false; } static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, - unsigned int esr, + unsigned long esr, struct pt_regs *regs) { unsigned long flags; u64 par, dfsc; - if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || - (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) + if (!is_el1_data_abort(esr) || !esr_fsc_is_translation_fault(esr)) return false; local_irq_save(flags); asm volatile("at s1e1r, %0" :: "r" (addr)); isb(); - par = read_sysreg(par_el1); + par = read_sysreg_par(); local_irq_restore(flags); /* @@ -269,26 +299,74 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, * treat the translation fault as spurious. */ dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); - return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; + return !esr_fsc_is_translation_fault(dfsc); } static void die_kernel_fault(const char *msg, unsigned long addr, - unsigned int esr, struct pt_regs *regs) + unsigned long esr, struct pt_regs *regs) { bust_spinlocks(1); pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, addr); + kasan_non_canonical_hook(addr); + mem_abort_decode(esr); show_pte(addr); die("Oops", regs, esr); bust_spinlocks(0); - do_exit(SIGKILL); + make_task_dead(SIGKILL); +} + +#ifdef CONFIG_KASAN_HW_TAGS +static void report_tag_fault(unsigned long addr, unsigned long esr, + struct pt_regs *regs) +{ + /* + * SAS bits aren't set for all faults reported in EL1, so we can't + * find out access size. + */ + bool is_write = !!(esr & ESR_ELx_WNR); + kasan_report((void *)addr, 0, is_write, regs->pc); } +#else +/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ +static inline void report_tag_fault(unsigned long addr, unsigned long esr, + struct pt_regs *regs) { } +#endif + +static void do_tag_recovery(unsigned long addr, unsigned long esr, + struct pt_regs *regs) +{ + + report_tag_fault(addr, esr, regs); -static void __do_kernel_fault(unsigned long addr, unsigned int esr, + /* + * Disable MTE Tag Checking on the local CPU for the current EL. + * It will be done lazily on the other CPUs when they will hit a + * tag fault. + */ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, + SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); + isb(); +} + +static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) +{ + unsigned long fsc = esr & ESR_ELx_FSC; + + if (!is_el1_data_abort(esr)) + return false; + + if (fsc == ESR_ELx_FSC_MTE) + return true; + + return false; +} + +static void __do_kernel_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { const char *msg; @@ -297,13 +375,19 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, * Are we prepared to handle this kernel fault? * We are almost certainly not prepared to handle instruction faults. */ - if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) + if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr)) return; if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) return; + if (is_el1_mte_sync_tag_check_fault(esr)) { + do_tag_recovery(addr, esr, regs); + + return; + } + if (is_el1_permission_fault(addr, esr, regs)) { if (esr & ESR_ELx_WNR) msg = "write to read-only memory"; @@ -314,13 +398,20 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { + if (esr_fsc_is_translation_fault(esr) && + kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) + return; + msg = "paging request"; } + if (efi_runtime_fixup_exception(regs, msg)) + return; + die_kernel_fault(msg, addr, esr, regs); } -static void set_thread_esr(unsigned long address, unsigned int esr) +static void set_thread_esr(unsigned long address, unsigned long esr) { current->thread.fault_address = address; @@ -368,7 +459,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) * exception level). Fail safe by not providing an ESR * context record at all. */ - WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); + WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); esr = 0; break; } @@ -377,8 +468,11 @@ static void set_thread_esr(unsigned long address, unsigned int esr) current->thread.fault_code = esr; } -static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static void do_bad_area(unsigned long far, unsigned long esr, + struct pt_regs *regs) { + unsigned long addr = untagged_addr(far); + /* * If we are in kernel mode at this point, we have no context to * handle this fault with. @@ -387,45 +481,38 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re const struct fault_info *inf = esr_to_fault_info(esr); set_thread_esr(addr, esr); - arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr, - inf->name); + arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); } else { __do_kernel_fault(addr, esr, regs); } } -#define VM_FAULT_BADMAP 0x010000 -#define VM_FAULT_BADACCESS 0x020000 - -static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags) +static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, + unsigned int mm_flags) { - struct vm_area_struct *vma = find_vma(mm, addr); + unsigned long iss2 = ESR_ELx_ISS2(esr); - if (unlikely(!vma)) - return VM_FAULT_BADMAP; + if (!system_supports_poe()) + return false; - /* - * Ok, we have a good vm_area for this memory access, so we can handle - * it. - */ - if (unlikely(vma->vm_start > addr)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - return VM_FAULT_BADMAP; - if (expand_stack(vma, addr)) - return VM_FAULT_BADMAP; - } + if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) + return true; - /* - * Check that the permissions on the VMA allow for the fault which - * occurred. - */ - if (!(vma->vm_flags & vm_flags)) - return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); + return !arch_vma_access_permitted(vma, + mm_flags & FAULT_FLAG_WRITE, + mm_flags & FAULT_FLAG_INSTRUCTION, + false); +} + +static bool is_gcs_fault(unsigned long esr) +{ + if (!esr_is_data_abort(esr)) + return false; + + return ESR_ELx_ISS2(esr) & ESR_ELx_GCS; } -static bool is_el0_instruction_abort(unsigned int esr) +static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; } @@ -434,19 +521,40 @@ static bool is_el0_instruction_abort(unsigned int esr) * Note: not valid for EL1 DC IVAC, but we never use that such that it * should fault. EL0 cannot issue DC IVAC (undef). */ -static bool is_write_abort(unsigned int esr) +static bool is_write_abort(unsigned long esr) { return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } -static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, +static bool is_invalid_gcs_access(struct vm_area_struct *vma, u64 esr) +{ + if (!system_supports_gcs()) + return false; + + if (unlikely(is_gcs_fault(esr))) { + /* GCS accesses must be performed on a GCS page */ + if (!(vma->vm_flags & VM_SHADOW_STACK)) + return true; + } else if (unlikely(vma->vm_flags & VM_SHADOW_STACK)) { + /* Only GCS operations can write to a GCS page */ + return esr_is_data_abort(esr) && is_write_abort(esr); + } + + return false; +} + +static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; struct mm_struct *mm = current->mm; - vm_fault_t fault, major = 0; - unsigned long vm_flags = VM_READ | VM_WRITE; - unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + vm_fault_t fault; + unsigned long vm_flags; + unsigned int mm_flags = FAULT_FLAG_DEFAULT; + unsigned long addr = untagged_addr(far); + struct vm_area_struct *vma; + int si_code; + int pkey = -1; if (kprobe_page_fault(regs, esr)) return 0; @@ -461,107 +569,150 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (user_mode(regs)) mm_flags |= FAULT_FLAG_USER; + /* + * vm_flags tells us what bits we must have in vma->vm_flags + * for the fault to be benign, __do_page_fault() would check + * vma->vm_flags & vm_flags and returns an error if the + * intersection is empty + */ if (is_el0_instruction_abort(esr)) { + /* It was exec fault */ vm_flags = VM_EXEC; mm_flags |= FAULT_FLAG_INSTRUCTION; + } else if (is_gcs_fault(esr)) { + /* + * The GCS permission on a page implies both read and + * write so always handle any GCS fault as a write fault, + * we need to trigger CoW even for GCS reads. + */ + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; } else if (is_write_abort(esr)) { + /* It was write fault */ vm_flags = VM_WRITE; mm_flags |= FAULT_FLAG_WRITE; + } else { + /* It was read fault */ + vm_flags = VM_READ; + /* Write implies read */ + vm_flags |= VM_WRITE; + /* If EPAN is absent then exec implies read */ + if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN)) + vm_flags |= VM_EXEC; } if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { - /* regs->orig_addr_limit may be 0 if we entered from EL0 */ - if (regs->orig_addr_limit == KERNEL_DS) - die_kernel_fault("access to user memory with fs=KERNEL_DS", - addr, esr, regs); - if (is_el1_instruction_abort(esr)) die_kernel_fault("execution of user memory", addr, esr, regs); - if (!search_exception_tables(regs->pc)) + if (!insn_may_access_user(regs->pc, esr)) die_kernel_fault("access to user memory outside uaccess routines", addr, esr, regs); } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!down_read_trylock(&mm->mmap_sem)) { - if (!user_mode(regs) && !search_exception_tables(regs->pc)) - goto no_context; -retry: - down_read(&mm->mmap_sem); - } else { - /* - * The above down_read_trylock() might have succeeded in which - * case, we'll have missed the might_sleep() from down_read(). - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && !search_exception_tables(regs->pc)) { - up_read(&mm->mmap_sem); + if (!(mm_flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, addr); + if (!vma) + goto lock_mmap; + + if (is_invalid_gcs_access(vma, esr)) { + vma_end_read(vma); + fault = 0; + si_code = SEGV_ACCERR; + goto bad_area; + } + + if (!(vma->vm_flags & vm_flags)) { + vma_end_read(vma); + fault = 0; + si_code = SEGV_ACCERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area; + } + + if (fault_from_pkey(esr, vma, mm_flags)) { + pkey = vma_pkey(vma); + vma_end_read(vma); + fault = 0; + si_code = SEGV_PKUERR; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto bad_area; + } + + fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + count_vm_vma_lock_event(VMA_LOCK_RETRY); + if (fault & VM_FAULT_MAJOR) + mm_flags |= FAULT_FLAG_TRIED; + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) goto no_context; - } -#endif + return 0; } +lock_mmap: - fault = __do_page_fault(mm, addr, mm_flags, vm_flags); - major |= fault & VM_FAULT_MAJOR; +retry: + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + fault = 0; + si_code = SEGV_MAPERR; + goto bad_area; + } - if (fault & VM_FAULT_RETRY) { - /* - * If we need to retry but a fatal signal is pending, - * handle the signal first. We do not need to release - * the mmap_sem because it would already be released - * in __lock_page_or_retry in mm/filemap.c. - */ - if (fatal_signal_pending(current)) { - if (!user_mode(regs)) - goto no_context; - return 0; - } + if (!(vma->vm_flags & vm_flags)) { + mmap_read_unlock(mm); + fault = 0; + si_code = SEGV_ACCERR; + goto bad_area; + } - /* - * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of - * starvation. - */ - if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { - mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; - mm_flags |= FAULT_FLAG_TRIED; - goto retry; - } + if (fault_from_pkey(esr, vma, mm_flags)) { + pkey = vma_pkey(vma); + mmap_read_unlock(mm); + fault = 0; + si_code = SEGV_PKUERR; + goto bad_area; } - up_read(&mm->mmap_sem); - /* - * Handle the "normal" (no error) case first. - */ - if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | - VM_FAULT_BADACCESS)))) { - /* - * Major/minor page fault accounting is only done - * once. If we go through a retry, it is extremely - * likely that the page will be found in page cache at - * that point. - */ - if (major) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, - addr); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, - addr); - } + fault = handle_mm_fault(vma, addr, mm_flags, regs); + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return 0; + } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return 0; + + if (fault & VM_FAULT_RETRY) { + mm_flags |= FAULT_FLAG_TRIED; + goto retry; } + mmap_read_unlock(mm); + +done: + /* Handle the "normal" (no error) case first. */ + if (likely(!(fault & VM_FAULT_ERROR))) + return 0; + si_code = SEGV_MAPERR; +bad_area: /* * If we are in kernel mode at this point, we have no context to * handle this fault with. @@ -586,8 +737,7 @@ retry: * We had some memory, but were unable to successfully fix up * this page fault. */ - arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, - inf->name); + arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name); } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { unsigned int lsb; @@ -595,17 +745,25 @@ retry: if (fault & VM_FAULT_HWPOISON_LARGE) lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); - arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb, - inf->name); + arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); } else { /* - * Something tried to access memory that isn't in our memory - * map. + * The pkey value that we return to userspace can be different + * from the pkey that caused the fault. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set POR_EL0 to deny access to pkey=4, touches, page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_lock, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. */ - arm64_force_sig_fault(SIGSEGV, - fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, - (void __user *)addr, - inf->name); + /* Something tried to access memory that out of memory map */ + if (si_code == SEGV_PKUERR) + arm64_force_sig_fault_pkey(far, inf->name, pkey); + else + arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name); } return 0; @@ -615,51 +773,77 @@ no_context: return 0; } -static int __kprobes do_translation_fault(unsigned long addr, - unsigned int esr, +static int __kprobes do_translation_fault(unsigned long far, + unsigned long esr, struct pt_regs *regs) { + unsigned long addr = untagged_addr(far); + if (is_ttbr0_addr(addr)) - return do_page_fault(addr, esr, regs); + return do_page_fault(far, esr, regs); - do_bad_area(addr, esr, regs); + do_bad_area(far, esr, regs); return 0; } -static int do_alignment_fault(unsigned long addr, unsigned int esr, +static int do_alignment_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { - do_bad_area(addr, esr, regs); + if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) && + compat_user_mode(regs)) + return do_compat_alignment_fixup(far, regs); + do_bad_area(far, esr, regs); return 0; } -static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) { return 1; /* "fault" */ } -static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; - void __user *siaddr; + unsigned long siaddr; inf = esr_to_fault_info(esr); - /* - * Return value ignored as we rely on signal merging. - * Future patches will make this more robust. - */ - apei_claim_sea(regs); + if (user_mode(regs) && apei_claim_sea(regs) == 0) { + /* + * APEI claimed this as a firmware-first notification. + * Some processing deferred to task_work before ret_to_user(). + */ + return 0; + } - if (esr & ESR_ELx_FnV) - siaddr = NULL; - else - siaddr = (void __user *)addr; + if (esr & ESR_ELx_FnV) { + siaddr = 0; + } else { + /* + * The architecture specifies that the tag bits of FAR_EL1 are + * UNKNOWN for synchronous external aborts. Mask them out now + * so that userspace doesn't see them. + */ + siaddr = untagged_addr(far); + } arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } +static int do_tag_check_fault(unsigned long far, unsigned long esr, + struct pt_regs *regs) +{ + /* + * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN + * for tag check faults. Set them to corresponding bits in the untagged + * address. + */ + far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK); + do_bad_area(far, esr, regs); + return 0; +} + static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, @@ -669,18 +853,18 @@ static const struct fault_info fault_info[] = { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 0 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 17" }, + { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, + { do_sea, SIGKILL, SI_KERNEL, "level -1 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, @@ -688,7 +872,7 @@ static const struct fault_info fault_info[] = { { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, + { do_sea, SIGKILL, SI_KERNEL, "level -1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented @@ -702,9 +886,9 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, + { do_bad, SIGKILL, SI_KERNEL, "level -1 address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level -1 translation fault" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, @@ -727,41 +911,33 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); + unsigned long addr = untagged_addr(far); - if (!inf->fn(addr, esr, regs)) + if (!inf->fn(far, esr, regs)) return; - if (!user_mode(regs)) { - pr_alert("Unhandled fault at 0x%016lx\n", addr); - mem_abort_decode(esr); - show_pte(addr); - } + if (!user_mode(regs)) + die_kernel_fault(inf->name, addr, esr, regs); - arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)addr, esr); + /* + * At this point we have an unrecognized fault type whose tag bits may + * have been defined as UNKNOWN. Therefore we only expose the untagged + * address to the signal handler. + */ + arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); } NOKPROBE_SYMBOL(do_mem_abort); -void do_el0_irq_bp_hardening(void) +void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) { - /* PC has already been checked in entry.S */ - arm64_apply_bp_hardening(); -} -NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); - -void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) -{ - arm64_notify_die("SP/PC alignment exception", regs, - SIGBUS, BUS_ADRALN, (void __user *)addr, esr); + arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, + addr, esr); } NOKPROBE_SYMBOL(do_sp_pc_abort); -int __init early_brk64(unsigned long addr, unsigned int esr, - struct pt_regs *regs); - /* * __refdata because early_brk64 is __init, but the reference to it is * clobbered at arch_initcall time. @@ -779,7 +955,7 @@ static struct fault_info __refdata debug_fault_info[] = { }; void __init hook_debug_fault_code(int nr, - int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int (*fn)(unsigned long, unsigned long, struct pt_regs *), int sig, int code, const char *name) { BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); @@ -799,25 +975,6 @@ void __init hook_debug_fault_code(int nr, */ static void debug_exception_enter(struct pt_regs *regs) { - /* - * Tell lockdep we disabled irqs in entry.S. Do nothing if they were - * already disabled to preserve the last enabled/disabled addresses. - */ - if (interrupts_enabled(regs)) - trace_hardirqs_off(); - - if (user_mode(regs)) { - RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); - } else { - /* - * We might have interrupted pretty much anything. In - * fact, if we're a debug exception, we can even interrupt - * NMI processing. We don't want this code makes in_nmi() - * to return true, but we need to notify RCU. - */ - rcu_nmi_enter(); - } - preempt_disable(); /* This code is a bit fragile. Test it. */ @@ -828,63 +985,51 @@ NOKPROBE_SYMBOL(debug_exception_enter); static void debug_exception_exit(struct pt_regs *regs) { preempt_enable_no_resched(); - - if (!user_mode(regs)) - rcu_nmi_exit(); - - if (interrupts_enabled(regs)) - trace_hardirqs_on(); } NOKPROBE_SYMBOL(debug_exception_exit); -#ifdef CONFIG_ARM64_ERRATUM_1463225 -DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); - -static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) -{ - if (user_mode(regs)) - return 0; - - if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) - return 0; - - /* - * We've taken a dummy step exception from the kernel to ensure - * that interrupts are re-enabled on the syscall path. Return back - * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions - * masked so that we can safely restore the mdscr and get on with - * handling the syscall. - */ - regs->pstate |= PSR_D_BIT; - return 1; -} -#else -static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) -{ - return 0; -} -#endif /* CONFIG_ARM64_ERRATUM_1463225 */ -NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler); - -void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); unsigned long pc = instruction_pointer(regs); - if (cortex_a76_erratum_1463225_debug_handler(regs)) - return; - debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); if (inf->fn(addr_if_watchpoint, esr, regs)) { - arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)pc, esr); + arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); } debug_exception_exit(regs); } NOKPROBE_SYMBOL(do_debug_exception); + +/* + * Used during anonymous page fault handling. + */ +struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, + unsigned long vaddr) +{ + gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; + + /* + * If the page is mapped with PROT_MTE, initialise the tags at the + * point of allocation and page zeroing as this is usually faster than + * separate DC ZVA and STGM. + */ + if (vma->vm_flags & VM_MTE) + flags |= __GFP_ZEROTAGS; + + return vma_alloc_folio(flags, 0, vma, vaddr); +} + +void tag_clear_highpage(struct page *page) +{ + /* Newly allocated page, shouldn't have been tagged yet */ + WARN_ON_ONCE(!try_page_mte_tagging(page)); + mte_zero_clear_page_tags(page_address(page)); + set_page_mte_tagged(page); +} diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c new file mode 100644 index 000000000000..c5c5425791da --- /dev/null +++ b/arch/arm64/mm/fixmap.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Fixmap manipulation code + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/libfdt.h> +#include <linux/memory.h> +#include <linux/mm.h> +#include <linux/sizes.h> + +#include <asm/fixmap.h> +#include <asm/kernel-pgtable.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +/* ensure that the fixmap region does not grow down into the PCI I/O region */ +static_assert(FIXADDR_TOT_START > PCI_IO_END); + +#define NR_BM_PTE_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) +#define NR_BM_PMD_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) + +static_assert(NR_BM_PMD_TABLES == 1); + +#define __BM_TABLE_IDX(addr, shift) \ + (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) + +#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) + +static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; + +static inline pte_t *fixmap_pte(unsigned long addr) +{ + return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; +} + +static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) +{ + pmd_t pmd = READ_ONCE(*pmdp); + pte_t *ptep; + + if (pmd_none(pmd)) { + ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; + __pmd_populate(pmdp, __pa_symbol(ptep), + PMD_TYPE_TABLE | PMD_TABLE_AF); + } +} + +static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + unsigned long next; + pud_t pud = READ_ONCE(*pudp); + pmd_t *pmdp; + + if (pud_none(pud)) + __pud_populate(pudp, __pa_symbol(bm_pmd), + PUD_TYPE_TABLE | PUD_TABLE_AF); + + pmdp = pmd_offset_kimg(pudp, addr); + do { + next = pmd_addr_end(addr, end); + early_fixmap_init_pte(pmdp, addr); + } while (pmdp++, addr = next, addr != end); +} + + +static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, + unsigned long end) +{ + p4d_t p4d = READ_ONCE(*p4dp); + pud_t *pudp; + + if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && + p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { + /* + * We only end up here if the kernel mapping and the fixmap + * share the top level pgd entry, which should only happen on + * 16k/4 levels configurations. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + } + + if (p4d_none(p4d)) + __p4d_populate(p4dp, __pa_symbol(bm_pud), + P4D_TYPE_TABLE | P4D_TABLE_AF); + + pudp = pud_offset_kimg(p4dp, addr); + early_fixmap_init_pmd(pudp, addr, end); +} + +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ +void __init early_fixmap_init(void) +{ + unsigned long addr = FIXADDR_TOT_START; + unsigned long end = FIXADDR_TOP; + + pgd_t *pgdp = pgd_offset_k(addr); + p4d_t *p4dp = p4d_offset_kimg(pgdp, addr); + + early_fixmap_init_pud(p4dp, addr, end); +} + +/* + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + + if (pgprot_val(flags)) { + __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + } else { + __pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} + +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +{ + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); + phys_addr_t dt_phys_base; + int offset; + void *dt_virt; + + /* + * Check whether the physical FDT address is set and meets the minimum + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be + * at least 8 bytes so that we can always access the magic and size + * fields of the FDT header after mapping the first chunk, double check + * here if that is indeed the case. + */ + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) + return NULL; + + dt_phys_base = round_down(dt_phys, PAGE_SIZE); + offset = dt_phys % PAGE_SIZE; + dt_virt = (void *)dt_virt_base + offset; + + /* map the first chunk so we can read the size from the header */ + create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); + + if (fdt_magic(dt_virt) != FDT_MAGIC) + return NULL; + + *size = fdt_totalsize(dt_virt); + if (*size > MAX_FDT_SIZE) + return NULL; + + if (offset + *size > PAGE_SIZE) { + create_mapping_noalloc(dt_phys_base, dt_virt_base, + offset + *size, prot); + } + + return dt_virt; +} diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index ac485163a4a7..013eead9b695 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -8,34 +8,32 @@ #include <linux/export.h> #include <linux/mm.h> +#include <linux/libnvdimm.h> #include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/cache.h> #include <asm/tlbflush.h> -void sync_icache_aliases(void *kaddr, unsigned long len) +void sync_icache_aliases(unsigned long start, unsigned long end) { - unsigned long addr = (unsigned long)kaddr; - if (icache_is_aliasing()) { - __clean_dcache_area_pou(kaddr, len); - __flush_icache_all(); + dcache_clean_pou(start, end); + icache_inval_all_pou(); } else { /* * Don't issue kick_all_cpus_sync() after I-cache invalidation * for user mappings. */ - __flush_icache_range(addr, addr + len); + caches_clean_inval_pou(start, end); } } -static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len) +static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { if (vma->vm_flags & VM_EXEC) - sync_icache_aliases(kaddr, len); + sync_icache_aliases(start, end); } /* @@ -48,15 +46,19 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long len) { memcpy(dst, src, len); - flush_ptrace_access(vma, page, uaddr, dst, len); + flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len); } void __sync_icache_dcache(pte_t pte) { - struct page *page = pte_page(pte); + struct folio *folio = page_folio(pte_page(pte)); - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) - sync_icache_aliases(page_address(page), page_size(page)); + if (!test_bit(PG_dcache_clean, &folio->flags)) { + sync_icache_aliases((unsigned long)folio_address(folio), + (unsigned long)folio_address(folio) + + folio_size(folio)); + set_bit(PG_dcache_clean, &folio->flags); + } } EXPORT_SYMBOL_GPL(__sync_icache_dcache); @@ -65,30 +67,36 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache); * it as dirty for later flushing when mapped in user space (if executable, * see __sync_icache_dcache). */ +void flush_dcache_folio(struct folio *folio) +{ + if (test_bit(PG_dcache_clean, &folio->flags)) + clear_bit(PG_dcache_clean, &folio->flags); +} +EXPORT_SYMBOL(flush_dcache_folio); + void flush_dcache_page(struct page *page) { - if (test_bit(PG_dcache_clean, &page->flags)) - clear_bit(PG_dcache_clean, &page->flags); + flush_dcache_folio(page_folio(page)); } EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ -EXPORT_SYMBOL(__flush_icache_range); +EXPORT_SYMBOL(caches_clean_inval_pou); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size) { /* Ensure order against any prior non-cacheable writes */ dmb(osh); - __clean_dcache_area_pop(addr, size); + dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { - __inval_dcache_area(addr, size); + dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); #endif diff --git a/arch/arm64/mm/gcs.c b/arch/arm64/mm/gcs.c new file mode 100644 index 000000000000..5c46ec527b1c --- /dev/null +++ b/arch/arm64/mm/gcs.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/syscalls.h> +#include <linux/types.h> + +#include <asm/cmpxchg.h> +#include <asm/cpufeature.h> +#include <asm/gcs.h> +#include <asm/page.h> + +static unsigned long alloc_gcs(unsigned long addr, unsigned long size) +{ + int flags = MAP_ANONYMOUS | MAP_PRIVATE; + struct mm_struct *mm = current->mm; + unsigned long mapped_addr, unused; + + if (addr) + flags |= MAP_FIXED_NOREPLACE; + + mmap_write_lock(mm); + mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags, + VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL); + mmap_write_unlock(mm); + + return mapped_addr; +} + +static unsigned long gcs_size(unsigned long size) +{ + if (size) + return PAGE_ALIGN(size); + + /* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */ + size = PAGE_ALIGN(min_t(unsigned long long, + rlimit(RLIMIT_STACK) / 2, SZ_2G)); + return max(PAGE_SIZE, size); +} + +unsigned long gcs_alloc_thread_stack(struct task_struct *tsk, + const struct kernel_clone_args *args) +{ + unsigned long addr, size; + + if (!system_supports_gcs()) + return 0; + + if (!task_gcs_el0_enabled(tsk)) + return 0; + + if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) { + tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); + return 0; + } + + size = args->stack_size / 2; + + size = gcs_size(size); + addr = alloc_gcs(0, size); + if (IS_ERR_VALUE(addr)) + return addr; + + tsk->thread.gcs_base = addr; + tsk->thread.gcs_size = size; + tsk->thread.gcspr_el0 = addr + size - sizeof(u64); + + return addr; +} + +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) +{ + unsigned long alloc_size; + unsigned long __user *cap_ptr; + unsigned long cap_val; + int ret = 0; + int cap_offset; + + if (!system_supports_gcs()) + return -EOPNOTSUPP; + + if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) + return -EINVAL; + + if (!PAGE_ALIGNED(addr)) + return -EINVAL; + + if (size == 8 || !IS_ALIGNED(size, 8)) + return -EINVAL; + + /* + * An overflow would result in attempting to write the restore token + * to the wrong location. Not catastrophic, but just return the right + * error code and block it. + */ + alloc_size = PAGE_ALIGN(size); + if (alloc_size < size) + return -EOVERFLOW; + + addr = alloc_gcs(addr, alloc_size); + if (IS_ERR_VALUE(addr)) + return addr; + + /* + * Put a cap token at the end of the allocated region so it + * can be switched to. + */ + if (flags & SHADOW_STACK_SET_TOKEN) { + /* Leave an extra empty frame as a top of stack marker? */ + if (flags & SHADOW_STACK_SET_MARKER) + cap_offset = 2; + else + cap_offset = 1; + + cap_ptr = (unsigned long __user *)(addr + size - + (cap_offset * sizeof(unsigned long))); + cap_val = GCS_CAP(cap_ptr); + + put_user_gcs(cap_val, cap_ptr, &ret); + if (ret != 0) { + vm_munmap(addr, size); + return -EFAULT; + } + + /* + * Ensure the new cap is ordered before standard + * memory accesses to the same location. + */ + gcsb_dsync(); + } + + return addr; +} + +/* + * Apply the GCS mode configured for the specified task to the + * hardware. + */ +void gcs_set_el0_mode(struct task_struct *task) +{ + u64 gcscre0_el1 = GCSCRE0_EL1_nTR; + + if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE) + gcscre0_el1 |= GCSCRE0_EL1_RVCHKEN | GCSCRE0_EL1_PCRSEL; + + if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE) + gcscre0_el1 |= GCSCRE0_EL1_STREn; + + if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH) + gcscre0_el1 |= GCSCRE0_EL1_PUSHMEn; + + write_sysreg_s(gcscre0_el1, SYS_GCSCRE0_EL1); +} + +void gcs_free(struct task_struct *task) +{ + if (!system_supports_gcs()) + return; + + /* + * When fork() with CLONE_VM fails, the child (tsk) already + * has a GCS allocated, and exit_thread() calls this function + * to free it. In this case the parent (current) and the + * child share the same mm struct. + */ + if (!task->mm || task->mm != current->mm) + return; + + if (task->thread.gcs_base) + vm_munmap(task->thread.gcs_base, task->thread.gcs_size); + + task->thread.gcspr_el0 = 0; + task->thread.gcs_base = 0; + task->thread.gcs_size = 0; +} + +int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg) +{ + unsigned long gcs, size; + int ret; + + if (!system_supports_gcs()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(task))) + return -EINVAL; + + /* Reject unknown flags */ + if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) + return -EINVAL; + + ret = gcs_check_locked(task, arg); + if (ret != 0) + return ret; + + /* If we are enabling GCS then make sure we have a stack */ + if (arg & PR_SHADOW_STACK_ENABLE && + !task_gcs_el0_enabled(task)) { + /* Do not allow GCS to be reenabled */ + if (task->thread.gcs_base || task->thread.gcspr_el0) + return -EINVAL; + + if (task != current) + return -EBUSY; + + size = gcs_size(0); + gcs = alloc_gcs(0, size); + if (!gcs) + return -ENOMEM; + + task->thread.gcspr_el0 = gcs + size - sizeof(u64); + task->thread.gcs_base = gcs; + task->thread.gcs_size = size; + if (task == current) + write_sysreg_s(task->thread.gcspr_el0, + SYS_GCSPR_EL0); + } + + task->thread.gcs_el0_mode = arg; + if (task == current) + gcs_set_el0_mode(task); + + return 0; +} + +int arch_get_shadow_stack_status(struct task_struct *task, + unsigned long __user *arg) +{ + if (!system_supports_gcs()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(task))) + return -EINVAL; + + return put_user(task->thread.gcs_el0_mode, arg); +} + +int arch_lock_shadow_stack_status(struct task_struct *task, + unsigned long arg) +{ + if (!system_supports_gcs()) + return -EINVAL; + + if (is_compat_thread(task_thread_info(task))) + return -EINVAL; + + /* + * We support locking unknown bits so applications can prevent + * any changes in a future proof manner. + */ + task->thread.gcs_el0_locked |= arg; + + return 0; +} diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index bbeb6a5a6ba6..0c8737f4f2ce 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -17,61 +17,79 @@ #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> -#include <asm/pgalloc.h> -#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION -bool arch_hugetlb_migration_supported(struct hstate *h) +/* + * HugeTLB Support Matrix + * + * --------------------------------------------------- + * | Page Size | CONT PTE | PMD | CONT PMD | PUD | + * --------------------------------------------------- + * | 4K | 64K | 2M | 32M | 1G | + * | 16K | 2M | 32M | 1G | | + * | 64K | 2M | 512M | 16G | | + * --------------------------------------------------- + */ + +/* + * Reserve CMA areas for the largest supported gigantic + * huge page when requested. Any other smaller gigantic + * huge pages could still be served from those areas. + */ +#ifdef CONFIG_CMA +void __init arm64_hugetlb_cma_reserve(void) { - size_t pagesize = huge_page_size(h); + int order; - switch (pagesize) { -#ifdef CONFIG_ARM64_4K_PAGES + if (pud_sect_supported()) + order = PUD_SHIFT - PAGE_SHIFT; + else + order = CONT_PMD_SHIFT - PAGE_SHIFT; + + hugetlb_cma_reserve(order); +} +#endif /* CONFIG_CMA */ + +static bool __hugetlb_valid_size(unsigned long size) +{ + switch (size) { +#ifndef __PAGETABLE_PMD_FOLDED case PUD_SIZE: + return pud_sect_supported(); #endif - case PMD_SIZE: case CONT_PMD_SIZE: + case PMD_SIZE: case CONT_PTE_SIZE: return true; } - pr_warn("%s: unrecognized huge page size 0x%lx\n", - __func__, pagesize); - return false; -} -#endif - -int pmd_huge(pmd_t pmd) -{ - return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); -} -int pud_huge(pud_t pud) -{ -#ifndef __PAGETABLE_PMD_FOLDED - return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); -#else - return 0; -#endif + return false; } -/* - * Select all bits except the pfn - */ -static inline pgprot_t pte_pgprot(pte_t pte) +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +bool arch_hugetlb_migration_supported(struct hstate *h) { - unsigned long pfn = pte_pfn(pte); + size_t pagesize = huge_page_size(h); - return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); + if (!__hugetlb_valid_size(pagesize)) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + return false; + } + return true; } +#endif static int find_num_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, size_t *pgsize) { pgd_t *pgdp = pgd_offset(mm, addr); + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; *pgsize = PAGE_SIZE; - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); pmdp = pmd_offset(pudp, addr); if ((pte_t *)pmdp == ptep) { *pgsize = PMD_SIZE; @@ -82,17 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr, static inline int num_contig_ptes(unsigned long size, size_t *pgsize) { - int contig_ptes = 0; + int contig_ptes = 1; *pgsize = size; switch (size) { -#ifdef CONFIG_ARM64_4K_PAGES - case PUD_SIZE: -#endif - case PMD_SIZE: - contig_ptes = 1; - break; case CONT_PMD_SIZE: *pgsize = PMD_SIZE; contig_ptes = CONT_PMDS; @@ -101,11 +113,35 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) *pgsize = PAGE_SIZE; contig_ptes = CONT_PTES; break; + default: + WARN_ON(!__hugetlb_valid_size(size)); } return contig_ptes; } +pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + int ncontig, i; + size_t pgsize; + pte_t orig_pte = __ptep_get(ptep); + + if (!pte_present(orig_pte) || !pte_cont(orig_pte)) + return orig_pte; + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + for (i = 0; i < ncontig; i++, ptep++) { + pte_t pte = __ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + return orig_pte; +} + /* * Changing some bits of contiguous entries requires us to follow a * Break-Before-Make approach, breaking the whole contiguous set @@ -114,35 +150,41 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) * * This helper performs the break step. */ -static pte_t get_clear_flush(struct mm_struct *mm, +static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = huge_ptep_get(ptep); - bool valid = pte_valid(orig_pte); - unsigned long i, saddr = addr; - - for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); - - /* - * If HW_AFDBM is enabled, then the HW could turn on - * the dirty or accessed bit for any page in the set, - * so check them all. - */ - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize); + present = pte_present(pte); + while (--ncontig) { + ptep++; + tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } + return pte; +} - if (valid) { - struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); - flush_tlb_range(&vma, saddr, addr); - } +static pte_t get_clear_contig_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long end = addr + (pgsize * ncontig); + + __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true); return orig_pte; } @@ -165,64 +207,51 @@ static void clear_flush(struct mm_struct *mm, unsigned long i, saddr = addr; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - pte_clear(mm, addr, ptep); + __ptep_get_and_clear_anysz(mm, ptep, pgsize); - flush_tlb_range(&vma, saddr, addr); + if (mm == &init_mm) + flush_tlb_kernel_range(saddr, addr); + else + __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) + pte_t *ptep, pte_t pte, unsigned long sz) { size_t pgsize; int i; int ncontig; - unsigned long pfn, dpfn; - pgprot_t hugeprot; - /* - * Code needs to be expanded to handle huge swap and migration - * entries. Needed for HUGETLB and MEMORY_FAILURE. - */ - WARN_ON(!pte_present(pte)); + ncontig = num_contig_ptes(sz, &pgsize); - if (!pte_cont(pte)) { - set_pte_at(mm, addr, ptep, pte); + if (!pte_present(pte)) { + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + __set_ptes_anysz(mm, ptep, pte, 1, pgsize); return; } - ncontig = find_num_contig(mm, addr, ptep, &pgsize); - pfn = pte_pfn(pte); - dpfn = pgsize >> PAGE_SHIFT; - hugeprot = pte_pgprot(pte); - - clear_flush(mm, addr, ptep, pgsize, ncontig); + /* Only need to "break" if transitioning valid -> valid. */ + if (pte_cont(pte) && pte_valid(__ptep_get(ptep))) + clear_flush(mm, addr, ptep, pgsize, ncontig); - for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); } -void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ - int i, ncontig; - size_t pgsize; - - ncontig = num_contig_ptes(sz, &pgsize); - - for (i = 0; i < ncontig; i++, ptep++) - set_pte(ptep, pte); -} - -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep = NULL; pgdp = pgd_offset(mm, addr); - pudp = pud_alloc(mm, pgdp, addr); + p4dp = p4d_alloc(mm, pgdp, addr); + if (!p4dp) + return NULL; + + pudp = pud_alloc(mm, p4dp, addr); if (!pudp) return NULL; @@ -230,20 +259,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, ptep = (pte_t *)pudp; } else if (sz == (CONT_PTE_SIZE)) { pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; WARN_ON(addr & (sz - 1)); - /* - * Note that if this code were ever ported to the - * 32-bit arm platform then it will cause trouble in - * the case where CONFIG_HIGHPTE is set, since there - * will be no pte_unmap() to correspond with this - * pte_alloc_map(). - */ - ptep = pte_alloc_map(mm, pmdp, addr); + ptep = pte_alloc_huge(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { @@ -259,6 +282,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp, pud; pmd_t *pmdp, pmd; @@ -266,12 +290,16 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pgd_present(READ_ONCE(*pgdp))) return NULL; - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + if (!p4d_present(READ_ONCE(*p4dp))) + return NULL; + + pudp = pud_offset(p4dp, addr); pud = READ_ONCE(*pudp); if (sz != PUD_SIZE && pud_none(pud)) return NULL; /* hugepage or swap? */ - if (pud_huge(pud) || !pud_present(pud)) + if (pud_leaf(pud) || !pud_present(pud)) return (pte_t *)pudp; /* table; check the next level */ @@ -283,28 +311,61 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && pmd_none(pmd)) return NULL; - if (pmd_huge(pmd) || !pmd_present(pmd)) + if (pmd_leaf(pmd) || !pmd_present(pmd)) return (pte_t *)pmdp; if (sz == CONT_PTE_SIZE) - return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK)); + return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK)); return NULL; } -pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, - struct page *page, int writable) +unsigned long hugetlb_mask_last_page(struct hstate *h) { - size_t pagesize = huge_page_size(hstate_vma(vma)); + unsigned long hp_size = huge_page_size(h); - if (pagesize == CONT_PTE_SIZE) { - entry = pte_mkcont(entry); - } else if (pagesize == CONT_PMD_SIZE) { - entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); - } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { - pr_warn("%s: unrecognized huge page size 0x%lx\n", - __func__, pagesize); + switch (hp_size) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + if (pud_sect_supported()) + return PGDIR_SIZE - PUD_SIZE; + break; +#endif + case CONT_PMD_SIZE: + return PUD_SIZE - CONT_PMD_SIZE; + case PMD_SIZE: + return PUD_SIZE - PMD_SIZE; + case CONT_PTE_SIZE: + return PMD_SIZE - CONT_PTE_SIZE; + default: + break; + } + + return 0UL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + switch (pagesize) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + if (pud_sect_supported()) + return pud_pte(pud_mkhuge(pte_pud(entry))); + break; +#endif + case CONT_PMD_SIZE: + return pmd_pte(pmd_mkhuge(pmd_mkcont(pte_pmd(entry)))); + case PMD_SIZE: + return pmd_pte(pmd_mkhuge(pte_pmd(entry))); + case CONT_PTE_SIZE: + return pte_mkcont(entry); + default: + break; } + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); return entry; } @@ -317,22 +378,17 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr, ncontig = num_contig_ptes(sz, &pgsize); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - pte_clear(mm, addr, ptep); + __pte_clear(mm, addr, ptep); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) { int ncontig; size_t pgsize; - pte_t orig_pte = huge_ptep_get(ptep); - if (!pte_cont(orig_pte)) - return ptep_get_and_clear(mm, addr, ptep); - - ncontig = find_num_contig(mm, addr, ptep, &pgsize); - - return get_clear_flush(mm, addr, ptep, pgsize, ncontig); + ncontig = num_contig_ptes(sz, &pgsize); + return get_clear_contig(mm, addr, ptep, pgsize, ncontig); } /* @@ -348,11 +404,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) { int i; - if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + if (pte_write(pte) != pte_write(__ptep_get(ptep))) return 1; for (i = 0; i < ncontig; i++) { - pte_t orig_pte = huge_ptep_get(ptep + i); + pte_t orig_pte = __ptep_get(ptep + i); if (pte_dirty(pte) != pte_dirty(orig_pte)) return 1; @@ -368,22 +424,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - int ncontig, i; + int ncontig; size_t pgsize = 0; - unsigned long pfn = pte_pfn(pte), dpfn; - pgprot_t hugeprot; + struct mm_struct *mm = vma->vm_mm; pte_t orig_pte; + VM_WARN_ON(!pte_present(pte)); + if (!pte_cont(pte)) - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return __ptep_set_access_flags(vma, addr, ptep, pte, dirty); - ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); - dpfn = pgsize >> PAGE_SHIFT; + ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize); if (!__cont_access_flags_changed(ptep, pte, ncontig)) return 0; - orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); + orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); + VM_WARN_ON(!pte_present(orig_pte)); /* Make sure we don't lose the dirty or young state */ if (pte_dirty(orig_pte)) @@ -392,93 +449,94 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, if (pte_young(orig_pte)) pte = pte_mkyoung(pte); - hugeprot = pte_pgprot(pte); - for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); - + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); return 1; } void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long pfn, dpfn; - pgprot_t hugeprot; - int ncontig, i; + int ncontig; size_t pgsize; pte_t pte; - if (!pte_cont(READ_ONCE(*ptep))) { - ptep_set_wrprotect(mm, addr, ptep); + pte = __ptep_get(ptep); + VM_WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + __ptep_set_wrprotect(mm, addr, ptep); return; } ncontig = find_num_contig(mm, addr, ptep, &pgsize); - dpfn = pgsize >> PAGE_SHIFT; - pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig); + pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); pte = pte_wrprotect(pte); - hugeprot = pte_pgprot(pte); - pfn = pte_pfn(pte); - - for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); } -void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) { + struct mm_struct *mm = vma->vm_mm; size_t pgsize; int ncontig; - if (!pte_cont(READ_ONCE(*ptep))) { - ptep_clear_flush(vma, addr, ptep); - return; - } - - ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); - clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); -} - -static void __init add_huge_page_size(unsigned long size) -{ - if (size_to_hstate(size)) - return; - - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); + ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize); + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); } static int __init hugetlbpage_init(void) { -#ifdef CONFIG_ARM64_4K_PAGES - add_huge_page_size(PUD_SIZE); -#endif - add_huge_page_size(CONT_PMD_SIZE); - add_huge_page_size(PMD_SIZE); - add_huge_page_size(CONT_PTE_SIZE); + /* + * HugeTLB pages are supported on maximum four page table + * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base + * page size, corresponding to hugetlb_add_hstate() calls + * here. + * + * HUGE_MAX_HSTATE should at least match maximum supported + * HugeTLB page sizes on the platform. Any new addition to + * supported HugeTLB page sizes will also require changing + * HUGE_MAX_HSTATE as well. + */ + BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); + if (pud_sect_supported()) + hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + + hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); return 0; } arch_initcall(hugetlbpage_init); -static __init int setup_hugepagesz(char *opt) +bool __init arch_hugetlb_valid_size(unsigned long size) { - unsigned long ps = memparse(opt, &opt); + return __hugetlb_valid_size(size); +} - switch (ps) { -#ifdef CONFIG_ARM64_4K_PAGES - case PUD_SIZE: -#endif - case CONT_PMD_SIZE: - case PMD_SIZE: - case CONT_PTE_SIZE: - add_huge_page_size(ps); - return 1; +pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + unsigned long psize = huge_page_size(hstate_vma(vma)); + + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { + /* + * Break-before-make (BBM) is required for all user space mappings + * when the permission changes from executable to non-executable + * in cases where cpu is affected with errata #2645198. + */ + if (pte_user_exec(__ptep_get(ptep))) + return huge_ptep_clear_flush(vma, addr, ptep); } + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); +} - hugetlb_bad_size(); - pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); - return 0; +void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + unsigned long psize = huge_page_size(hstate_vma(vma)); + + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); } -__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b65dffdfb201..0c8c35dd645e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -16,33 +16,38 @@ #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/gfp.h> +#include <linux/math.h> #include <linux/memblock.h> #include <linux/sort.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/dma-direct.h> -#include <linux/dma-mapping.h> -#include <linux/dma-contiguous.h> +#include <linux/dma-map-ops.h> #include <linux/efi.h> #include <linux/swiotlb.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/kexec.h> #include <linux/crash_dump.h> +#include <linux/hugetlb.h> +#include <linux/acpi_iort.h> +#include <linux/kmemleak.h> +#include <linux/execmem.h> #include <asm/boot.h> #include <asm/fixmap.h> #include <asm/kasan.h> #include <asm/kernel-pgtable.h> +#include <asm/kvm_host.h> #include <asm/memory.h> #include <asm/numa.h> +#include <asm/rsi.h> #include <asm/sections.h> #include <asm/setup.h> #include <linux/sizes.h> #include <asm/tlb.h> #include <asm/alternative.h> - -#define ARM64_ZONE_DMA_BITS 30 +#include <asm/xen/swiotlb-xen.h> /* * We need to be able to catch inadvertent references to memstart_addr @@ -53,232 +58,114 @@ s64 memstart_addr __ro_after_init = -1; EXPORT_SYMBOL(memstart_addr); -s64 physvirt_offset __ro_after_init; -EXPORT_SYMBOL(physvirt_offset); - -struct page *vmemmap __ro_after_init; -EXPORT_SYMBOL(vmemmap); - /* - * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of - * memory as some devices, namely the Raspberry Pi 4, have peripherals with - * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32 - * bit addressable memory area. + * If the corresponding config options are enabled, we create both ZONE_DMA + * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory + * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). + * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, + * otherwise it is empty. */ -phys_addr_t arm64_dma_phys_limit __ro_after_init; -static phys_addr_t arm64_dma32_phys_limit __ro_after_init; +phys_addr_t __ro_after_init arm64_dma_phys_limit; -#ifdef CONFIG_KEXEC_CORE /* - * reserve_crashkernel() - reserves memory for crash kernel - * - * This function reserves memory area given in "crashkernel=" kernel command - * line parameter. The memory reserved is used by dump capture kernel when - * primary kernel is crashing. + * To make optimal use of block mappings when laying out the linear + * mapping, round down the base of physical memory to a size that can + * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE + * (64k granule), or a multiple that can be mapped using contiguous bits + * in the page tables: 32 * PMD_SIZE (16k granule) */ -static void __init reserve_crashkernel(void) -{ - unsigned long long crash_base, crash_size; - int ret; - - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), - &crash_size, &crash_base); - /* no crashkernel= or invalid value specified */ - if (ret || !crash_size) - return; - - crash_size = PAGE_ALIGN(crash_size); - - if (crash_base == 0) { - /* Current arm64 boot protocol requires 2MB alignment */ - crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit, - crash_size, SZ_2M); - if (crash_base == 0) { - pr_warn("cannot allocate crashkernel (size:0x%llx)\n", - crash_size); - return; - } - } else { - /* User specifies base address explicitly. */ - if (!memblock_is_region_memory(crash_base, crash_size)) { - pr_warn("cannot reserve crashkernel: region is not memory\n"); - return; - } - - if (memblock_is_region_reserved(crash_base, crash_size)) { - pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); - return; - } - - if (!IS_ALIGNED(crash_base, SZ_2M)) { - pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); - return; - } - } - memblock_reserve(crash_base, crash_size); - - pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", - crash_base, crash_base + crash_size, crash_size >> 20); - - crashk_res.start = crash_base; - crashk_res.end = crash_base + crash_size - 1; -} +#if defined(CONFIG_ARM64_4K_PAGES) +#define ARM64_MEMSTART_SHIFT PUD_SHIFT +#elif defined(CONFIG_ARM64_16K_PAGES) +#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT #else -static void __init reserve_crashkernel(void) -{ -} -#endif /* CONFIG_KEXEC_CORE */ - -#ifdef CONFIG_CRASH_DUMP -static int __init early_init_dt_scan_elfcorehdr(unsigned long node, - const char *uname, int depth, void *data) -{ - const __be32 *reg; - int len; - - if (depth != 1 || strcmp(uname, "chosen") != 0) - return 0; - - reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); - if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) - return 1; - - elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); - elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); - - return 1; -} +#define ARM64_MEMSTART_SHIFT PMD_SHIFT +#endif /* - * reserve_elfcorehdr() - reserves memory for elf core header - * - * This function reserves the memory occupied by an elf core header - * described in the device tree. This region contains all the - * information about primary kernel's core image and is used by a dump - * capture kernel to access the system memory on primary kernel. + * sparsemem vmemmap imposes an additional requirement on the alignment of + * memstart_addr, due to the fact that the base of the vmemmap region + * has a direct correspondence, and needs to appear sufficiently aligned + * in the virtual address space. */ -static void __init reserve_elfcorehdr(void) +#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS +#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) +#else +#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) +#endif + +static void __init arch_reserve_crashkernel(void) { - of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); + unsigned long long low_size = 0; + unsigned long long crash_base, crash_size; + bool high = false; + int ret; - if (!elfcorehdr_size) + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) return; - if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { - pr_warn("elfcorehdr is overlapped\n"); + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base, + &low_size, &high); + if (ret) return; - } - - memblock_reserve(elfcorehdr_addr, elfcorehdr_size); - pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", - elfcorehdr_size >> 10, elfcorehdr_addr); + reserve_crashkernel_generic(crash_size, crash_base, low_size, high); } -#else -static void __init reserve_elfcorehdr(void) -{ -} -#endif /* CONFIG_CRASH_DUMP */ -/* - * Return the maximum physical address for a zone with a given address size - * limit. It currently assumes that for memory starting above 4G, 32-bit - * devices will use a DMA offset. - */ -static phys_addr_t __init max_zone_phys(unsigned int zone_bits) +static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit) { - phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits); - return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM()); + return min(zone_limit, memblock_end_of_DRAM() - 1) + 1; } -#ifdef CONFIG_NUMA - -static void __init zone_sizes_init(unsigned long min, unsigned long max) +static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; + phys_addr_t __maybe_unused acpi_zone_dma_limit; + phys_addr_t __maybe_unused dt_zone_dma_limit; + phys_addr_t __maybe_unused dma32_phys_limit = + max_zone_phys(DMA_BIT_MASK(32)); #ifdef CONFIG_ZONE_DMA + acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address(); + dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL); + zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit); + /* + * Information we get from firmware (e.g. DT dma-ranges) describe DMA + * bus constraints. Devices using DMA might have their own limitations. + * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM + * DMA zone on platforms that have RAM there. + */ + if (memblock_start_of_DRAM() < U32_MAX) + zone_dma_limit = min(zone_dma_limit, U32_MAX); + arm64_dma_phys_limit = max_zone_phys(zone_dma_limit); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); + if (!arm64_dma_phys_limit) + arm64_dma_phys_limit = dma32_phys_limit; #endif - max_zone_pfns[ZONE_NORMAL] = max; + if (!arm64_dma_phys_limit) + arm64_dma_phys_limit = PHYS_MASK + 1; + max_zone_pfns[ZONE_NORMAL] = max_pfn; - free_area_init_nodes(max_zone_pfns); + free_area_init(max_zone_pfns); } -#else - -static void __init zone_sizes_init(unsigned long min, unsigned long max) +int pfn_is_map_memory(unsigned long pfn) { - struct memblock_region *reg; - unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long __maybe_unused max_dma, max_dma32; + phys_addr_t addr = PFN_PHYS(pfn); - memset(zone_size, 0, sizeof(zone_size)); - - max_dma = max_dma32 = min; -#ifdef CONFIG_ZONE_DMA - max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit); - zone_size[ZONE_DMA] = max_dma - min; -#endif -#ifdef CONFIG_ZONE_DMA32 - max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); - zone_size[ZONE_DMA32] = max_dma32 - max_dma; -#endif - zone_size[ZONE_NORMAL] = max - max_dma32; - - memcpy(zhole_size, zone_size, sizeof(zhole_size)); - - for_each_memblock(memory, reg) { - unsigned long start = memblock_region_memory_base_pfn(reg); - unsigned long end = memblock_region_memory_end_pfn(reg); - -#ifdef CONFIG_ZONE_DMA - if (start >= min && start < max_dma) { - unsigned long dma_end = min(end, max_dma); - zhole_size[ZONE_DMA] -= dma_end - start; - start = dma_end; - } -#endif -#ifdef CONFIG_ZONE_DMA32 - if (start >= max_dma && start < max_dma32) { - unsigned long dma32_end = min(end, max_dma32); - zhole_size[ZONE_DMA32] -= dma32_end - start; - start = dma32_end; - } -#endif - if (start >= max_dma32 && start < max) { - unsigned long normal_end = min(end, max); - zhole_size[ZONE_NORMAL] -= normal_end - start; - } - } - - free_area_init_node(0, zone_size, min, zhole_size); -} - -#endif /* CONFIG_NUMA */ - -int pfn_valid(unsigned long pfn) -{ - phys_addr_t addr = pfn << PAGE_SHIFT; - - if ((addr >> PAGE_SHIFT) != pfn) - return 0; - -#ifdef CONFIG_SPARSEMEM - if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + /* avoid false positives for bogus PFNs, see comment in pfn_valid() */ + if (PHYS_PFN(addr) != pfn) return 0; - if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) - return 0; -#endif return memblock_is_map_memory(addr); } -EXPORT_SYMBOL(pfn_valid); +EXPORT_SYMBOL(pfn_is_map_memory); -static phys_addr_t memory_limit = PHYS_ADDR_MAX; +static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX; /* * Limit the memory size that was specified via FDT. @@ -295,44 +182,23 @@ static int __init early_mem(char *p) } early_param("mem", early_mem); -static int __init early_init_dt_scan_usablemem(unsigned long node, - const char *uname, int depth, void *data) -{ - struct memblock_region *usablemem = data; - const __be32 *reg; - int len; - - if (depth != 1 || strcmp(uname, "chosen") != 0) - return 0; - - reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); - if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) - return 1; - - usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); - usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); - - return 1; -} - -static void __init fdt_enforce_memory_region(void) -{ - struct memblock_region reg = { - .size = 0, - }; - - of_scan_flat_dt(early_init_dt_scan_usablemem, ®); - - if (reg.size) - memblock_cap_memory_range(reg.base, reg.size); -} - void __init arm64_memblock_init(void) { - const s64 linear_region_size = BIT(vabits_actual - 1); + s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); - /* Handle linux,usable-memory-range property */ - fdt_enforce_memory_region(); + /* + * Corner case: 52-bit VA capable systems running KVM in nVHE mode may + * be limited in their ability to support a linear map that exceeds 51 + * bits of VA space, depending on the placement of the ID map. Given + * that the placement of the ID map may be randomized, let's simply + * limit the kernel's linear map to 51 bits as well if we detect this + * configuration. + */ + if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 && + is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { + pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n"); + linear_region_size = min_t(u64, linear_region_size, BIT(51)); + } /* Remove memory above our supported physical address size */ memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); @@ -343,19 +209,8 @@ void __init arm64_memblock_init(void) memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); - physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; - - vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); - - /* - * If we are running with a 52-bit kernel VA config on a system that - * does not support it, we have to offset our vmemmap and physvirt_offset - * s.t. we avoid the 52-bit portion of the direct linear map - */ - if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { - vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; - physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); - } + if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) + pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n"); /* * Remove the memory that we will not be able to cover with the @@ -372,6 +227,16 @@ void __init arm64_memblock_init(void) } /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to place the available physical + * memory in the 48-bit addressable part of the linear region, i.e., + * we have to move it upward. Since memstart_addr represents the + * physical address of PAGE_OFFSET, we have to *subtract* from it. + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) + memstart_addr -= _PAGE_OFFSET(vabits_actual) - _PAGE_OFFSET(52); + + /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible * via the linear mapping. @@ -404,34 +269,17 @@ void __init arm64_memblock_init(void) "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { phys_initrd_size = 0; } else { - memblock_remove(base, size); /* clear MEMBLOCK_ flags */ memblock_add(base, size); + memblock_clear_nomap(base, size); memblock_reserve(base, size); } } - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { - extern u16 memstart_offset_seed; - u64 range = linear_region_size - - (memblock_end_of_DRAM() - memblock_start_of_DRAM()); - - /* - * If the size of the linear region exceeds, by a sufficient - * margin, the size of the region that the available physical - * memory spans, randomize the linear region as well. - */ - if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { - range /= ARM64_MEMSTART_ALIGN; - memstart_addr -= ARM64_MEMSTART_ALIGN * - ((range * memstart_offset_seed) >> 16); - } - } - /* * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_stext), _end - _stext); if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { /* the generic initrd code expects virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); @@ -439,24 +287,6 @@ void __init arm64_memblock_init(void) } early_init_fdt_scan_reserved_mem(); - - if (IS_ENABLED(CONFIG_ZONE_DMA)) { - zone_dma_bits = ARM64_ZONE_DMA_BITS; - arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS); - } - - if (IS_ENABLED(CONFIG_ZONE_DMA32)) - arm64_dma32_phys_limit = max_zone_phys(32); - else - arm64_dma32_phys_limit = PHYS_MASK + 1; - - reserve_crashkernel(); - - reserve_elfcorehdr(); - - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; - - dma_contiguous_reserve(arm64_dma32_phys_limit); } void __init bootmem_init(void) @@ -471,109 +301,63 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; min_low_pfn = min; - arm64_numa_init(); + arch_numa_init(); + /* - * Sparsemem tries to allocate bootmem in memory_present(), so must be - * done after the fixed reservations. + * must be done after arch_numa_init() which calls numa_init() to + * initialize node_online_map that gets used in hugetlb_cma_reserve() + * while allocating required CMA size across online nodes. */ - memblocks_present(); - - sparse_init(); - zone_sizes_init(min, max); +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) + arm64_hugetlb_cma_reserve(); +#endif - memblock_dump_all(); -} - -#ifndef CONFIG_SPARSEMEM_VMEMMAP -static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) -{ - struct page *start_pg, *end_pg; - unsigned long pg, pgend; + kvm_hyp_reserve(); /* - * Convert start_pfn/end_pfn to a struct page pointer. + * sparse_init() tries to allocate memory from memblock, so must be + * done after the fixed reservations */ - start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn - 1) + 1; + sparse_init(); + zone_sizes_init(); /* - * Convert to physical addresses, and round start upwards and end - * downwards. + * Reserve the CMA area after arm64_dma_phys_limit was initialised. */ - pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); - pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; + dma_contiguous_reserve(arm64_dma_phys_limit); /* - * If there are free pages between these, free the section of the - * memmap array. + * request_standard_resources() depends on crashkernel's memory being + * reserved, so do it here. */ - if (pg < pgend) - memblock_free(pg, pgend - pg); + arch_reserve_crashkernel(); + + memblock_dump_all(); } -/* - * The mem_map array can get very big. Free the unused area of the memory map. - */ -static void __init free_unused_memmap(void) +void __init arch_mm_preinit(void) { - unsigned long start, prev_end = 0; - struct memblock_region *reg; + unsigned int flags = SWIOTLB_VERBOSE; + bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); - for_each_memblock(memory, reg) { - start = __phys_to_pfn(reg->base); - -#ifdef CONFIG_SPARSEMEM - /* - * Take care not to free memmap entries that don't exist due - * to SPARSEMEM sections which aren't present. - */ - start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); -#endif - /* - * If we had a previous bank, and there is a space between the - * current bank and the previous, free it. - */ - if (prev_end && prev_end < start) - free_memmap(prev_end, start); + if (is_realm_world()) { + swiotlb = true; + flags |= SWIOTLB_FORCE; + } + if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) { /* - * Align up here since the VM subsystem insists that the - * memmap entries are valid from the bank end aligned to - * MAX_ORDER_NR_PAGES. + * If no bouncing needed for ZONE_DMA, reduce the swiotlb + * buffer for kmalloc() bouncing to 1MB per 1GB of RAM. */ - prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), - MAX_ORDER_NR_PAGES); + unsigned long size = + DIV_ROUND_UP(memblock_phys_mem_size(), 1024); + swiotlb_adjust_size(min(swiotlb_size_or_default(), size)); + swiotlb = true; } -#ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) - free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); -#endif -} -#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ - -/* - * mem_init() marks the free areas in the mem_map and tells us how much memory - * is free. This is done after various parts of the system have claimed their - * memory after the kernel image. - */ -void __init mem_init(void) -{ - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit)) - swiotlb_init(1); - else - swiotlb_force = SWIOTLB_NO_FORCE; - - set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); - -#ifndef CONFIG_SPARSEMEM_VMEMMAP - free_unused_memmap(); -#endif - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - - mem_init_print_info(NULL); + swiotlb_init(swiotlb, flags); + swiotlb_update_mem_attributes(); /* * Check boundaries twice: Some fundamental inconsistencies can be @@ -583,6 +367,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); #endif + /* + * Selected page table levels should match when derived from + * scratch using the virtual address range and page size. + */ + BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) != + CONFIG_PGTABLE_LEVELS); + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* @@ -595,38 +386,169 @@ void __init mem_init(void) void free_initmem(void) { - free_reserved_area(lm_alias(__init_begin), - lm_alias(__init_end), + void *lm_init_begin = lm_alias(__init_begin); + void *lm_init_end = lm_alias(__init_end); + + WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE)); + WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE)); + + /* Delete __init region from memblock.reserved. */ + memblock_free(lm_init_begin, lm_init_end - lm_init_begin); + + free_reserved_area(lm_init_begin, lm_init_end, POISON_FREE_INITMEM, "unused kernel"); /* * Unmap the __init region but leave the VM area in place. This * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); + vunmap_range((u64)__init_begin, (u64)__init_end); } -/* - * Dump out memory limit information on panic. - */ -static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) +void dump_mem_limit(void) { if (memory_limit != PHYS_ADDR_MAX) { pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); } else { pr_emerg("Memory Limit: none\n"); } - return 0; } -static struct notifier_block mem_limit_notifier = { - .notifier_call = dump_mem_limit, -}; +#ifdef CONFIG_EXECMEM +static u64 module_direct_base __ro_after_init = 0; +static u64 module_plt_base __ro_after_init = 0; -static int __init register_mem_limit_dumper(void) +/* + * Choose a random page-aligned base address for a window of 'size' bytes which + * entirely contains the interval [start, end - 1]. + */ +static u64 __init random_bounding_box(u64 size, u64 start, u64 end) { - atomic_notifier_chain_register(&panic_notifier_list, - &mem_limit_notifier); + u64 max_pgoff, pgoff; + + if ((end - start) >= size) + return 0; + + max_pgoff = (size - (end - start)) / PAGE_SIZE; + pgoff = get_random_u32_inclusive(0, max_pgoff); + + return start - pgoff * PAGE_SIZE; +} + +/* + * Modules may directly reference data and text anywhere within the kernel + * image and other modules. References using PREL32 relocations have a +/-2G + * range, and so we need to ensure that the entire kernel image and all modules + * fall within a 2G window such that these are always within range. + * + * Modules may directly branch to functions and code within the kernel text, + * and to functions and code within other modules. These branches will use + * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure + * that the entire kernel text and all module text falls within a 128M window + * such that these are always within range. With PLTs, we can expand this to a + * 2G window. + * + * We chose the 128M region to surround the entire kernel image (rather than + * just the text) as using the same bounds for the 128M and 2G regions ensures + * by construction that we never select a 128M region that is not a subset of + * the 2G region. For very large and unusual kernel configurations this means + * we may fall back to PLTs where they could have been avoided, but this keeps + * the logic significantly simpler. + */ +static int __init module_init_limits(void) +{ + u64 kernel_end = (u64)_end; + u64 kernel_start = (u64)_text; + u64 kernel_size = kernel_end - kernel_start; + + /* + * The default modules region is placed immediately below the kernel + * image, and is large enough to use the full 2G relocation range. + */ + BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END); + BUILD_BUG_ON(MODULES_VSIZE < SZ_2G); + + if (!kaslr_enabled()) { + if (kernel_size < SZ_128M) + module_direct_base = kernel_end - SZ_128M; + if (kernel_size < SZ_2G) + module_plt_base = kernel_end - SZ_2G; + } else { + u64 min = kernel_start; + u64 max = kernel_end; + + if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { + pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n"); + } else { + module_direct_base = random_bounding_box(SZ_128M, min, max); + if (module_direct_base) { + min = module_direct_base; + max = module_direct_base + SZ_128M; + } + } + + module_plt_base = random_bounding_box(SZ_2G, min, max); + } + + pr_info("%llu pages in range for non-PLT usage", + module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); + pr_info("%llu pages in range for PLT usage", + module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); + return 0; } -__initcall(register_mem_limit_dumper); + +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + unsigned long fallback_start = 0, fallback_end = 0; + unsigned long start = 0, end = 0; + + module_init_limits(); + + /* + * Where possible, prefer to allocate within direct branch range of the + * kernel such that no PLTs are necessary. + */ + if (module_direct_base) { + start = module_direct_base; + end = module_direct_base + SZ_128M; + + if (module_plt_base) { + fallback_start = module_plt_base; + fallback_end = module_plt_base + SZ_2G; + } + } else if (module_plt_base) { + start = module_plt_base; + end = module_plt_base + SZ_2G; + } + + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = start, + .end = end, + .pgprot = PAGE_KERNEL, + .alignment = 1, + .fallback_start = fallback_start, + .fallback_end = fallback_end, + }, + [EXECMEM_KPROBES] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL_ROX, + .alignment = 1, + }, + [EXECMEM_BPF] = { + .start = VMALLOC_START, + .end = VMALLOC_END, + .pgprot = PAGE_KERNEL, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} +#endif /* CONFIG_EXECMEM */ diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 9be71bee902c..10e246f11271 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -1,97 +1,44 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Based on arch/arm/mm/ioremap.c - * - * (C) Copyright 1995 1996 Linus Torvalds - * Hacked for ARM by Phil Blundell <philb@gnu.org> - * Hacked to allow all architectures to build, and various cleanups - * by Russell King - * Copyright (C) 2012 ARM Ltd. - */ -#include <linux/export.h> #include <linux/mm.h> -#include <linux/vmalloc.h> #include <linux/io.h> -#include <asm/fixmap.h> -#include <asm/tlbflush.h> -#include <asm/pgalloc.h> +static ioremap_prot_hook_t ioremap_prot_hook; -static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, - pgprot_t prot, void *caller) +int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook) { - unsigned long last_addr; - unsigned long offset = phys_addr & ~PAGE_MASK; - int err; - unsigned long addr; - struct vm_struct *area; - - /* - * Page align the mapping address and size, taking account of any - * offset. - */ - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(size + offset); + if (WARN_ON(ioremap_prot_hook)) + return -EBUSY; - /* - * Don't allow wraparound, zero size or outside PHYS_MASK. - */ - last_addr = phys_addr + size - 1; - if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) - return NULL; + ioremap_prot_hook = hook; + return 0; +} - /* - * Don't allow RAM to be mapped. - */ - if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr)))) - return NULL; +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + pgprot_t pgprot) +{ + unsigned long last_addr = phys_addr + size - 1; - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) + /* Don't allow outside PHYS_MASK */ + if (last_addr & ~PHYS_MASK) return NULL; - addr = (unsigned long)area->addr; - area->phys_addr = phys_addr; - err = ioremap_page_range(addr, addr + size, phys_addr, prot); - if (err) { - vunmap((void *)addr); + /* Don't allow RAM to be mapped. */ + if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) return NULL; - } - - return (void __iomem *)(offset + addr); -} - -void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) -{ - return __ioremap_caller(phys_addr, size, prot, - __builtin_return_address(0)); -} -EXPORT_SYMBOL(__ioremap); - -void iounmap(volatile void __iomem *io_addr) -{ - unsigned long addr = (unsigned long)io_addr & PAGE_MASK; /* - * We could get an address outside vmalloc range in case - * of ioremap_cache() reusing a RAM mapping. + * If a hook is registered (e.g. for confidential computing + * purposes), call that now and barf if it fails. */ - if (is_vmalloc_addr((void *)addr)) - vunmap((void *)addr); -} -EXPORT_SYMBOL(iounmap); - -void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) -{ - /* For normal memory we already have a cacheable mapping. */ - if (pfn_valid(__phys_to_pfn(phys_addr))) - return (void __iomem *)__phys_to_virt(phys_addr); + if (unlikely(ioremap_prot_hook) && + WARN_ON(ioremap_prot_hook(phys_addr, size, &pgprot))) { + return NULL; + } - return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), - __builtin_return_address(0)); + return generic_ioremap_prot(phys_addr, size, pgprot); } -EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_prot); /* * Must be called after early_fixmap_init @@ -100,3 +47,11 @@ void __init early_ioremap_init(void) { early_ioremap_setup(); } + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return pfn_is_map_memory(pfn); +} diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index f87a32484ea8..d541ce45daeb 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -18,11 +18,12 @@ #include <asm/kernel-pgtable.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/sections.h> #include <asm/tlbflush.h> -static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE); /* * The p*d_populate functions call virt_to_phys implicitly so they can't be used @@ -35,7 +36,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) { void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_KASAN, node); + MEMBLOCK_ALLOC_NOLEAKTRACE, node); if (!p) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", __func__, PAGE_SIZE, PAGE_SIZE, node, @@ -48,7 +49,8 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) { void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_KASAN, node); + MEMBLOCK_ALLOC_NOLEAKTRACE, + node); if (!p) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", __func__, PAGE_SIZE, PAGE_SIZE, node, @@ -78,23 +80,36 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, phys_addr_t pmd_phys = early ? __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); - __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); } return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); } -static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node, +static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) { - if (pgd_none(READ_ONCE(*pgdp))) { + if (p4d_none(READ_ONCE(*p4dp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); - __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); + __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE); + } + + return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr); +} + +static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, + bool early) +{ + if (pgd_none(READ_ONCE(*pgdp))) { + phys_addr_t p4d_phys = early ? + __pa_symbol(kasan_early_shadow_p4d) + : kasan_alloc_zeroed_page(node); + __pgd_populate(pgdp, p4d_phys, PGD_TYPE_TABLE); } - return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr); + return early ? p4d_offset_kimg(pgdp, addr) : p4d_offset(pgdp, addr); } static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, @@ -110,8 +125,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, if (!early) memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); next = addr + PAGE_SIZE; - set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); + __set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -126,11 +141,11 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); } -static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr, +static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early); + pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); do { next = pud_addr_end(addr, end); @@ -138,6 +153,18 @@ static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr, } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); } +static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, + unsigned long end, int node, bool early) +{ + unsigned long next; + p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early); + + do { + next = p4d_addr_end(addr, end); + kasan_pud_populate(p4dp, addr, next, node, early); + } while (p4dp++, addr = next, addr != end && p4d_none(READ_ONCE(*p4dp))); +} + static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, int node, bool early) { @@ -147,18 +174,52 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, pgdp = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - kasan_pud_populate(pgdp, addr, next, node, early); + kasan_p4d_populate(pgdp, addr, next, node, early); } while (pgdp++, addr = next, addr != end); } +#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS > 4 +#define SHADOW_ALIGN P4D_SIZE +#else +#define SHADOW_ALIGN PUD_SIZE +#endif + +/* + * Return whether 'addr' is aligned to the size covered by a root level + * descriptor. + */ +static bool __init root_level_aligned(u64 addr) +{ + int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * PTDESC_TABLE_SHIFT; + + return (addr % (PAGE_SIZE << shift)) == 0; +} + /* The early shadow maps everything to a single page of zeroes */ asmlinkage void __init kasan_early_init(void) { BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); - BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); - BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE)); - BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), SHADOW_ALIGN)); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), SHADOW_ALIGN)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, SHADOW_ALIGN)); + + if (!root_level_aligned(KASAN_SHADOW_START)) { + /* + * The start address is misaligned, and so the next level table + * will be shared with the linear region. This can happen with + * 4 or 5 level paging, so install a generic pte_t[] as the + * next level. This prevents the kasan_pgd_populate call below + * from inserting an entry that refers to the shared KASAN zero + * shadow pud_t[]/p4d_t[], which could end up getting corrupted + * when the linear region is mapped. + */ + static pte_t tbl[PTRS_PER_PTE] __page_aligned_bss; + pgd_t *pgdp = pgd_offset_k(KASAN_SHADOW_START); + + set_pgd(pgdp, __pgd(__pa_symbol(tbl) | PGD_TYPE_TABLE)); + } + kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); } @@ -171,44 +232,86 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end, } /* - * Copy the current shadow region into a new pgdir. + * Return the descriptor index of 'addr' in the root level table */ -void __init kasan_copy_shadow(pgd_t *pgdir) +static int __init root_level_idx(u64 addr) { - pgd_t *pgdp, *pgdp_new, *pgdp_end; + /* + * On 64k pages, the TTBR1 range root tables are extended for 52-bit + * virtual addressing, and TTBR1 will simply point to the pgd_t entry + * that covers the start of the 48-bit addressable VA space if LVA is + * not implemented. This means we need to index the table as usual, + * instead of masking off bits based on vabits_actual. + */ + u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS + : vabits_actual; + int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * PTDESC_TABLE_SHIFT; - pgdp = pgd_offset_k(KASAN_SHADOW_START); - pgdp_end = pgd_offset_k(KASAN_SHADOW_END); - pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); - do { - set_pgd(pgdp_new, READ_ONCE(*pgdp)); - } while (pgdp++, pgdp_new++, pgdp != pgdp_end); + return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT); } -static void __init clear_pgds(unsigned long start, - unsigned long end) +/* + * Clone a next level table from swapper_pg_dir into tmp_pg_dir + */ +static void __init clone_next_level(u64 addr, pgd_t *tmp_pg_dir, pud_t *pud) { - /* - * Remove references to kasan page tables from - * swapper_pg_dir. pgd_clear() can't be used - * here because it's nop on 2,3-level pagetable setups - */ - for (; start < end; start += PGDIR_SIZE) - set_pgd(pgd_offset_k(start), __pgd(0)); + int idx = root_level_idx(addr); + pgd_t pgd = READ_ONCE(swapper_pg_dir[idx]); + pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd)); + + memcpy(pud, pudp, PAGE_SIZE); + tmp_pg_dir[idx] = __pgd(__phys_to_pgd_val(__pa_symbol(pud)) | + PUD_TYPE_TABLE); } -void __init kasan_init(void) +/* + * Return the descriptor index of 'addr' in the next level table + */ +static int __init next_level_idx(u64 addr) { + int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * PTDESC_TABLE_SHIFT; + + return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE; +} + +/* + * Dereference the table descriptor at 'pgd_idx' and clear the entries from + * 'start' to 'end' (exclusive) from the table. + */ +static void __init clear_next_level(int pgd_idx, int start, int end) +{ + pgd_t pgd = READ_ONCE(swapper_pg_dir[pgd_idx]); + pud_t *pudp = (pud_t *)__phys_to_kimg(__pgd_to_phys(pgd)); + + memset(&pudp[start], 0, (end - start) * sizeof(pud_t)); +} + +static void __init clear_shadow(u64 start, u64 end) +{ + int l = root_level_idx(start), m = root_level_idx(end); + + if (!root_level_aligned(start)) + clear_next_level(l++, next_level_idx(start), PTRS_PER_PTE); + if (!root_level_aligned(end)) + clear_next_level(m, 0, next_level_idx(end)); + memset(&swapper_pg_dir[l], 0, (m - l) * sizeof(pgd_t)); +} + +static void __init kasan_init_shadow(void) +{ + static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); u64 kimg_shadow_start, kimg_shadow_end; - u64 mod_shadow_start, mod_shadow_end; - struct memblock_region *reg; - int i; + u64 mod_shadow_start; + u64 vmalloc_shadow_end; + phys_addr_t pa_start, pa_end; + u64 i; - kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; - kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); + kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK; + kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); - mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); + + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); /* * We are going to perform proper setup of shadow memory. @@ -218,26 +321,37 @@ void __init kasan_init(void) * setup will be finished. */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); + + /* + * If the start or end address of the shadow region is not aligned to + * the root level size, we have to allocate a temporary next-level table + * in each case, clone the next level of descriptors, and install the + * table into tmp_pg_dir. Note that with 5 levels of paging, the next + * level will in fact be p4d_t, but that makes no difference in this + * case. + */ + if (!root_level_aligned(KASAN_SHADOW_START)) + clone_next_level(KASAN_SHADOW_START, tmp_pg_dir, pud[0]); + if (!root_level_aligned(KASAN_SHADOW_END)) + clone_next_level(KASAN_SHADOW_END, tmp_pg_dir, pud[1]); dsb(ishst); cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); - clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + clear_shadow(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START)))); kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), (void *)mod_shadow_start); - kasan_populate_early_shadow((void *)kimg_shadow_end, - (void *)KASAN_SHADOW_END); - if (kimg_shadow_start > mod_shadow_end) - kasan_populate_early_shadow((void *)mod_shadow_end, - (void *)kimg_shadow_start); + BUILD_BUG_ON(VMALLOC_START != MODULES_END); + kasan_populate_early_shadow((void *)vmalloc_shadow_end, + (void *)KASAN_SHADOW_END); - for_each_memblock(memory, reg) { - void *start = (void *)__phys_to_virt(reg->base); - void *end = (void *)__phys_to_virt(reg->base + reg->size); + for_each_mem_range(i, &pa_start, &pa_end) { + void *start = (void *)__phys_to_virt(pa_start); + void *end = (void *)__phys_to_virt(pa_end); if (start >= end) break; @@ -252,14 +366,47 @@ void __init kasan_init(void) * so we should make sure that it maps the zero page read-only. */ for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(&kasan_early_shadow_pte[i], + __set_pte(&kasan_early_shadow_pte[i], pfn_pte(sym_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO)); memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); +} - /* At this point kasan is fully initialized. Enable error messages */ +static void __init kasan_init_depth(void) +{ init_task.kasan_depth = 0; - pr_info("KernelAddressSanitizer initialized\n"); } + +#ifdef CONFIG_KASAN_VMALLOC +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) +{ + unsigned long shadow_start, shadow_end; + + if (!is_vmalloc_or_module_addr(start)) + return; + + shadow_start = (unsigned long)kasan_mem_to_shadow(start); + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); + shadow_end = ALIGN(shadow_end, PAGE_SIZE); + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); +} +#endif + +void __init kasan_init(void) +{ + kasan_init_shadow(); + kasan_init_depth(); +#if defined(CONFIG_KASAN_GENERIC) + /* + * Generic KASAN is now fully initialized. + * Software and Hardware Tag-Based modes still require + * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly. + */ + pr_info("KernelAddressSanitizer initialized (generic)\n"); +#endif +} + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ diff --git a/arch/arm64/mm/mem_encrypt.c b/arch/arm64/mm/mem_encrypt.c new file mode 100644 index 000000000000..ee3c0ab04384 --- /dev/null +++ b/arch/arm64/mm/mem_encrypt.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the memory encryption/decryption API. + * + * Since the low-level details of the operation depend on the + * Confidential Computing environment (e.g. pKVM, CCA, ...), this just + * acts as a top-level dispatcher to whatever hooks may have been + * registered. + * + * Author: Will Deacon <will@kernel.org> + * Copyright (C) 2024 Google LLC + * + * "Hello, boils and ghouls!" + */ + +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/mm.h> + +#include <asm/mem_encrypt.h> + +static const struct arm64_mem_crypt_ops *crypt_ops; + +int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops) +{ + if (WARN_ON(crypt_ops)) + return -EBUSY; + + crypt_ops = ops; + return 0; +} + +int set_memory_encrypted(unsigned long addr, int numpages) +{ + if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr))) + return 0; + + return crypt_ops->encrypt(addr, numpages); +} +EXPORT_SYMBOL_GPL(set_memory_encrypted); + +int set_memory_decrypted(unsigned long addr, int numpages) +{ + if (likely(!crypt_ops) || WARN_ON(!PAGE_ALIGNED(addr))) + return 0; + + return crypt_ops->decrypt(addr, numpages); +} +EXPORT_SYMBOL_GPL(set_memory_decrypted); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 3028bacbc4e9..c86c348857c4 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -5,20 +5,34 @@ * Copyright (C) 2012 ARM Ltd. */ -#include <linux/elf.h> -#include <linux/fs.h> +#include <linux/io.h> #include <linux/memblock.h> #include <linux/mm.h> -#include <linux/mman.h> -#include <linux/export.h> -#include <linux/shm.h> -#include <linux/sched/signal.h> -#include <linux/sched/mm.h> -#include <linux/io.h> -#include <linux/personality.h> -#include <linux/random.h> +#include <linux/types.h> + +#include <asm/cpufeature.h> +#include <asm/page.h> -#include <asm/cputype.h> +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_READONLY, + [VM_WRITE | VM_READ] = PAGE_READONLY, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; /* * You really shouldn't be using read() or write() on /dev/mem. This might go @@ -48,23 +62,64 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); } -#ifdef CONFIG_STRICT_DEVMEM +static int __init adjust_protection_map(void) +{ + /* + * With Enhanced PAN we can honour the execute-only permissions as + * there is no PAN override with such mappings. + */ + if (cpus_have_cap(ARM64_HAS_EPAN)) { + protection_map[VM_EXEC] = PAGE_EXECONLY; + protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; + } -#include <linux/ioport.h> + if (lpa2_is_enabled()) + for (int i = 0; i < ARRAY_SIZE(protection_map); i++) + pgprot_val(protection_map[i]) &= ~PTE_SHARED; -/* - * devmem_is_allowed() checks to see if /dev/mem access to a certain address - * is valid. The argument is a physical page number. We mimic x86 here by - * disallowing access to system RAM as well as device-exclusive MMIO regions. - * This effectively disable read()/write() on /dev/mem. - */ -int devmem_is_allowed(unsigned long pfn) -{ - if (iomem_is_exclusive(pfn << PAGE_SHIFT)) - return 0; - if (!page_is_ram(pfn)) - return 1; return 0; } +arch_initcall(adjust_protection_map); +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + ptdesc_t prot; + + /* Short circuit GCS to avoid bloating the table. */ + if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { + prot = _PAGE_GCS_RO; + } else { + prot = pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); + } + + if (vm_flags & VM_ARM64_BTI) + prot |= PTE_GP; + + /* + * There are two conditions required for returning a Normal Tagged + * memory type: (1) the user requested it via PROT_MTE passed to + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We + * register (1) as VM_MTE in the vma->vm_flags and (2) as + * VM_MTE_ALLOWED. Note that the latter can only be set during the + * mmap() call since mprotect() does not accept MAP_* flags. + * Checking for VM_MTE only is sufficient since arch_validate_flags() + * does not permit (VM_MTE & !VM_MTE_ALLOWED). + */ + if (vm_flags & VM_MTE) + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); + +#ifdef CONFIG_ARCH_HAS_PKEYS + if (system_supports_poe()) { + if (vm_flags & VM_PKEY_BIT0) + prot |= PTE_PO_IDX_0; + if (vm_flags & VM_PKEY_BIT1) + prot |= PTE_PO_IDX_1; + if (vm_flags & VM_PKEY_BIT2) + prot |= PTE_PO_IDX_2; + } #endif + + return __pgprot(prot); +} +EXPORT_SYMBOL(vm_get_page_prot); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5a3b15a14a7f..8fcf59ba39db 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -17,10 +17,15 @@ #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/memblock.h> +#include <linux/memremap.h> +#include <linux/memory.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> +#include <linux/set_memory.h> +#include <linux/kfence.h> +#include <linux/pkeys.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -34,19 +39,33 @@ #include <asm/mmu_context.h> #include <asm/ptdump.h> #include <asm/tlbflush.h> +#include <asm/pgalloc.h> +#include <asm/kfence.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ -u64 idmap_t0sz = TCR_T0SZ(VA_BITS); -u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; - -u64 __section(".mmuoff.data.write") vabits_actual; -EXPORT_SYMBOL(vabits_actual); +enum pgtable_type { + TABLE_PTE, + TABLE_PMD, + TABLE_PUD, + TABLE_P4D, +}; u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); +u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 }; + +static bool rodata_is_rw __ro_after_init = true; + +/* + * The booting CPU updates the failed status @__early_cpu_boot_status, + * with MMU turned off. + */ +long __section(".mmuoff.data.write") __early_cpu_boot_status; + /* * Empty_zero_page is a special page that is used for zero-initialized data * and COW. @@ -54,16 +73,24 @@ EXPORT_SYMBOL(kimage_voffset); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; -static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; -static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; - static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_MUTEX(fixmap_lock); -void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) +void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { pgd_t *fixmap_pgdp; + /* + * Don't bother with the fixmap if swapper_pg_dir is still mapped + * writable in the kernel mapping. + */ + if (rodata_is_rw) { + WRITE_ONCE(*pgdp, pgd); + dsb(ishst); + isb(); + return; + } + spin_lock(&swapper_pgdir_lock); fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); WRITE_ONCE(*fixmap_pgdp, pgd); @@ -79,7 +106,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { - if (!pfn_valid(pfn)) + if (!pfn_is_map_memory(pfn)) return pgprot_noncached(vma_prot); else if (file->f_flags & O_SYNC) return pgprot_writecombine(vma_prot); @@ -87,45 +114,35 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, } EXPORT_SYMBOL(phys_mem_access_prot); -static phys_addr_t __init early_pgtable_alloc(int shift) +static phys_addr_t __init early_pgtable_alloc(enum pgtable_type pgtable_type) { phys_addr_t phys; - void *ptr; - phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, + MEMBLOCK_ALLOC_NOLEAKTRACE); if (!phys) panic("Failed to allocate page table page\n"); - /* - * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE - * slot will be free, so we can (ab)use the FIX_PTE slot to initialise - * any level of table. - */ - ptr = pte_set_fixmap(phys); - - memset(ptr, 0, PAGE_SIZE); - - /* - * Implicit barriers also ensure the zeroed page is visible to the page - * table walker - */ - pte_clear_fixmap(); - return phys; } -static bool pgattr_change_is_safe(u64 old, u64 new) +bool pgattr_change_is_safe(pteval_t old, pteval_t new) { /* * The following mapping attributes may be updated in live * kernel mappings without the need for break-before-make. */ - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; + pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG | + PTE_SWBITS_MASK; /* creating or taking down mappings is always safe */ - if (old == 0 || new == 0) + if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) return true; + /* A live entry's pfn should not change */ + if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) + return false; + /* live contiguous mappings may not be manipulated at all */ if ((old | new) & PTE_CONT) return false; @@ -134,51 +151,78 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if (old & ~new & PTE_NG) return false; + /* + * Changing the memory type between Normal and Normal-Tagged is safe + * since Tagged is considered a permission attribute from the + * mismatched attribute aliases perspective. + */ + if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && + ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) + mask |= PTE_ATTRINDX_MASK; + return ((old ^ new) & ~mask) == 0; } -static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, - phys_addr_t phys, pgprot_t prot) +static void init_clear_pgtable(void *table) { - pte_t *ptep; + clear_page(table); - ptep = pte_set_fixmap_offset(pmdp, addr); + /* Ensure the zeroing is observed by page table walks. */ + dsb(ishst); +} + +static void init_pte(pte_t *ptep, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot) +{ do { - pte_t old_pte = READ_ONCE(*ptep); + pte_t old_pte = __ptep_get(ptep); - set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); + /* + * Required barriers to make this visible to the table walker + * are deferred to the end of alloc_init_cont_pte(). + */ + __set_pte_nosync(ptep, pfn_pte(__phys_to_pfn(phys), prot)); /* * After the PTE entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), - READ_ONCE(pte_val(*ptep)))); + pte_val(__ptep_get(ptep)))); phys += PAGE_SIZE; } while (ptep++, addr += PAGE_SIZE, addr != end); - - pte_clear_fixmap(); } static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), + phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) { unsigned long next; pmd_t pmd = READ_ONCE(*pmdp); + pte_t *ptep; BUG_ON(pmd_sect(pmd)); if (pmd_none(pmd)) { + pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN | PMD_TABLE_AF; phys_addr_t pte_phys; + + if (flags & NO_EXEC_MAPPINGS) + pmdval |= PMD_TABLE_PXN; BUG_ON(!pgtable_alloc); - pte_phys = pgtable_alloc(PAGE_SHIFT); - __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); - pmd = READ_ONCE(*pmdp); + pte_phys = pgtable_alloc(TABLE_PTE); + ptep = pte_set_fixmap(pte_phys); + init_clear_pgtable(ptep); + ptep += pte_index(addr); + __pmd_populate(pmdp, pte_phys, pmdval); + } else { + BUG_ON(pmd_bad(pmd)); + ptep = pte_set_fixmap_offset(pmdp, addr); } - BUG_ON(pmd_bad(pmd)); do { pgprot_t __prot = prot; @@ -190,27 +234,33 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - init_pte(pmdp, addr, next, phys, __prot); + init_pte(ptep, addr, next, phys, __prot); + ptep += pte_index(next) - pte_index(addr); phys += next - addr; } while (addr = next, addr != end); + + /* + * Note: barriers and maintenance necessary to clear the fixmap slot + * ensure that all previous pgtable writes are visible to the table + * walker. + */ + pte_clear_fixmap(); } -static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, +static void init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), int flags) + phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) { unsigned long next; - pmd_t *pmdp; - pmdp = pmd_set_fixmap_offset(pudp, addr); do { pmd_t old_pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0 && + if (((addr | next | phys) & ~PMD_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { pmd_set_huge(pmdp, phys, prot); @@ -229,30 +279,38 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, } phys += next - addr; } while (pmdp++, addr = next, addr != end); - - pmd_clear_fixmap(); } static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), int flags) + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { unsigned long next; pud_t pud = READ_ONCE(*pudp); + pmd_t *pmdp; /* * Check for initial section mappings in the pgd/pud. */ BUG_ON(pud_sect(pud)); if (pud_none(pud)) { + pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN | PUD_TABLE_AF; phys_addr_t pmd_phys; + + if (flags & NO_EXEC_MAPPINGS) + pudval |= PUD_TABLE_PXN; BUG_ON(!pgtable_alloc); - pmd_phys = pgtable_alloc(PMD_SHIFT); - __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); - pud = READ_ONCE(*pudp); + pmd_phys = pgtable_alloc(TABLE_PMD); + pmdp = pmd_set_fixmap(pmd_phys); + init_clear_pgtable(pmdp); + pmdp += pmd_index(addr); + __pud_populate(pudp, pmd_phys, pudval); + } else { + BUG_ON(pud_bad(pud)); + pmdp = pmd_set_fixmap_offset(pudp, addr); } - BUG_ON(pud_bad(pud)); do { pgprot_t __prot = prot; @@ -264,43 +322,41 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); + init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc, flags); + pmdp += pmd_index(next) - pmd_index(addr); phys += next - addr; } while (addr = next, addr != end); -} - -static inline bool use_1G_block(unsigned long addr, unsigned long next, - unsigned long phys) -{ - if (PAGE_SHIFT != 12) - return false; - - if (((addr | next | phys) & ~PUD_MASK) != 0) - return false; - return true; + pmd_clear_fixmap(); } -static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, +static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), + phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags) { unsigned long next; + p4d_t p4d = READ_ONCE(*p4dp); pud_t *pudp; - pgd_t pgd = READ_ONCE(*pgdp); - if (pgd_none(pgd)) { + if (p4d_none(p4d)) { + p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN | P4D_TABLE_AF; phys_addr_t pud_phys; + + if (flags & NO_EXEC_MAPPINGS) + p4dval |= P4D_TABLE_PXN; BUG_ON(!pgtable_alloc); - pud_phys = pgtable_alloc(PUD_SHIFT); - __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); - pgd = READ_ONCE(*pgdp); + pud_phys = pgtable_alloc(TABLE_PUD); + pudp = pud_set_fixmap(pud_phys); + init_clear_pgtable(pudp); + pudp += pud_index(addr); + __p4d_populate(p4dp, pud_phys, p4dval); + } else { + BUG_ON(p4d_bad(p4d)); + pudp = pud_set_fixmap_offset(p4dp, addr); } - BUG_ON(pgd_bad(pgd)); - pudp = pud_set_fixmap_offset(pgdp, addr); do { pud_t old_pud = READ_ONCE(*pudp); @@ -309,7 +365,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, /* * For 4K granule only, attempt to put down a 1GB block */ - if (use_1G_block(addr, next, phys) && + if (pud_sect_supported() && + ((addr | next | phys) & ~PUD_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { pud_set_huge(pudp, phys, prot); @@ -332,14 +389,57 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, pud_clear_fixmap(); } -static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, - unsigned long virt, phys_addr_t size, - pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), - int flags) +static void alloc_init_p4d(pgd_t *pgdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) +{ + unsigned long next; + pgd_t pgd = READ_ONCE(*pgdp); + p4d_t *p4dp; + + if (pgd_none(pgd)) { + pgdval_t pgdval = PGD_TYPE_TABLE | PGD_TABLE_UXN | PGD_TABLE_AF; + phys_addr_t p4d_phys; + + if (flags & NO_EXEC_MAPPINGS) + pgdval |= PGD_TABLE_PXN; + BUG_ON(!pgtable_alloc); + p4d_phys = pgtable_alloc(TABLE_P4D); + p4dp = p4d_set_fixmap(p4d_phys); + init_clear_pgtable(p4dp); + p4dp += p4d_index(addr); + __pgd_populate(pgdp, p4d_phys, pgdval); + } else { + BUG_ON(pgd_bad(pgd)); + p4dp = p4d_set_fixmap_offset(pgdp, addr); + } + + do { + p4d_t old_p4d = READ_ONCE(*p4dp); + + next = p4d_addr_end(addr, end); + + alloc_init_pud(p4dp, addr, next, phys, prot, + pgtable_alloc, flags); + + BUG_ON(p4d_val(old_p4d) != 0 && + p4d_val(old_p4d) != READ_ONCE(p4d_val(*p4dp))); + + phys += next - addr; + } while (p4dp++, addr = next, addr != end); + + p4d_clear_fixmap(); +} + +static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { unsigned long addr, end, next; - pgd_t *pgdp = pgd_offset_raw(pgdir, virt); + pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); /* * If the virtual and physical address don't have the same offset @@ -354,51 +454,81 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, do { next = pgd_addr_end(addr, end); - alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, + alloc_init_p4d(pgdp, addr, next, phys, prot, pgtable_alloc, flags); phys += next - addr; } while (pgdp++, addr = next, addr != end); } -static phys_addr_t __pgd_pgtable_alloc(int shift) +static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags) { - void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); - BUG_ON(!ptr); - - /* Ensure the zeroed page is visible to the page table walker */ - dsb(ishst); - return __pa(ptr); + mutex_lock(&fixmap_lock); + __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, + pgtable_alloc, flags); + mutex_unlock(&fixmap_lock); } -static phys_addr_t pgd_pgtable_alloc(int shift) -{ - phys_addr_t pa = __pgd_pgtable_alloc(shift); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +extern __alias(__create_pgd_mapping_locked) +void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(enum pgtable_type), + int flags); +#endif - /* - * Call proper page table ctor in case later we need to - * call core mm functions like apply_to_page_range() on - * this pre-allocated page table. - * - * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is - * folded, and if so pgtable_pmd_page_ctor() becomes nop. - */ - if (shift == PAGE_SHIFT) - BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); - else if (shift == PMD_SHIFT) - BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); +static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, + enum pgtable_type pgtable_type) +{ + /* Page is zeroed by init_clear_pgtable() so don't duplicate effort. */ + struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & ~__GFP_ZERO, 0); + phys_addr_t pa; + + BUG_ON(!ptdesc); + pa = page_to_phys(ptdesc_page(ptdesc)); + + switch (pgtable_type) { + case TABLE_PTE: + BUG_ON(!pagetable_pte_ctor(mm, ptdesc)); + break; + case TABLE_PMD: + BUG_ON(!pagetable_pmd_ctor(mm, ptdesc)); + break; + case TABLE_PUD: + pagetable_pud_ctor(ptdesc); + break; + case TABLE_P4D: + pagetable_p4d_ctor(ptdesc); + break; + } return pa; } +static phys_addr_t __maybe_unused +pgd_pgtable_alloc_init_mm(enum pgtable_type pgtable_type) +{ + return __pgd_pgtable_alloc(&init_mm, pgtable_type); +} + +static phys_addr_t +pgd_pgtable_alloc_special_mm(enum pgtable_type pgtable_type) +{ + return __pgd_pgtable_alloc(NULL, pgtable_type); +} + /* * This function can only be used to modify existing table entries, * without allocating new levels of table. Note that this permits the * creation of new section or page entries. */ -static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, - phys_addr_t size, pgprot_t prot) +void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) { - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { + if (virt < PAGE_OFFSET) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; @@ -419,13 +549,13 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(mm->pgd, phys, virt, size, prot, - pgd_pgtable_alloc, flags); + pgd_pgtable_alloc_special_mm, flags); } static void update_mapping_prot(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { + if (virt < PAGE_OFFSET) { pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; @@ -450,20 +580,92 @@ void __init mark_linear_text_alias_ro(void) /* * Remove the write permissions from the linear alias of .text/.rodata */ - update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), - (unsigned long)__init_begin - (unsigned long)_text, + update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), + (unsigned long)__init_begin - (unsigned long)_stext, PAGE_KERNEL_RO); } +#ifdef CONFIG_KFENCE + +bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; + +/* early_param() will be parsed before map_mem() below. */ +static int __init parse_kfence_early_init(char *arg) +{ + int val; + + if (get_option(&arg, &val)) + kfence_early_init = !!val; + return 0; +} +early_param("kfence.sample_interval", parse_kfence_early_init); + +static phys_addr_t __init arm64_kfence_alloc_pool(void) +{ + phys_addr_t kfence_pool; + + if (!kfence_early_init) + return 0; + + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!kfence_pool) { + pr_err("failed to allocate kfence pool\n"); + kfence_early_init = false; + return 0; + } + + /* Temporarily mark as NOMAP. */ + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + + return kfence_pool; +} + +static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) +{ + if (!kfence_pool) + return; + + /* KFENCE pool needs page-level mapping. */ + __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); + __kfence_pool = phys_to_virt(kfence_pool); +} +#else /* CONFIG_KFENCE */ + +static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } +static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } + +#endif /* CONFIG_KFENCE */ + static void __init map_mem(pgd_t *pgdp) { - phys_addr_t kernel_start = __pa_symbol(_text); + static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); + phys_addr_t kernel_start = __pa_symbol(_stext); phys_addr_t kernel_end = __pa_symbol(__init_begin); - struct memblock_region *reg; - int flags = 0; + phys_addr_t start, end; + phys_addr_t early_kfence_pool; + int flags = NO_EXEC_MAPPINGS; + u64 i; - if (rodata_full || debug_pagealloc_enabled()) - flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + /* + * Setting hierarchical PXNTable attributes on table entries covering + * the linear region is only possible if it is guaranteed that no table + * entries at any level are being shared between the linear region and + * the vmalloc region. Check whether this is true for the PGD level, in + * which case it is guaranteed to be true for all other levels as well. + * (Unless we are running with support for LPA2, in which case the + * entire reduced VA space is covered by a single pgd_t which will have + * been populated without the PXNTable attribute by the time we get here.) + */ + BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) && + pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1); + + early_kfence_pool = arm64_kfence_alloc_pool(); + + if (can_set_direct_map()) + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* * Take care not to create a writable alias for the @@ -472,27 +674,22 @@ static void __init map_mem(pgd_t *pgdp) * the following for-loop */ memblock_mark_nomap(kernel_start, kernel_end - kernel_start); -#ifdef CONFIG_KEXEC_CORE - if (crashk_res.end) - memblock_mark_nomap(crashk_res.start, - resource_size(&crashk_res)); -#endif /* map all the memory banks */ - for_each_memblock(memory, reg) { - phys_addr_t start = reg->base; - phys_addr_t end = start + reg->size; - + for_each_mem_range(i, &start, &end) { if (start >= end) break; - if (memblock_is_nomap(reg)) - continue; - - __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); + /* + * The linear map must allow allocation tags reading/writing + * if MTE is present. Otherwise, it has the same attributes as + * PAGE_KERNEL. + */ + __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), + flags); } /* - * Map the linear alias of the [_text, __init_begin) interval + * Map the linear alias of the [_stext, __init_begin) interval * as non-executable now, and remove the write permission in * mark_linear_text_alias_ro() below (which will be called after * alternative patching has completed). This makes the contents @@ -504,21 +701,7 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); - -#ifdef CONFIG_KEXEC_CORE - /* - * Use page-level mappings here so that we can shrink the region - * in page granularity and put back unused memory to buddy system - * through /sys/kernel/kexec_crash_size interface. - */ - if (crashk_res.end) { - __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, - PAGE_KERNEL, - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(crashk_res.start, - resource_size(&crashk_res)); - } -#endif + arm64_kfence_map_pool(early_kfence_pool, pgdp); } void mark_rodata_ro(void) @@ -530,15 +713,14 @@ void mark_rodata_ro(void) * to cover NOTES and EXCEPTION_TABLE. */ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; + WRITE_ONCE(rodata_is_rw, false); update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); - - debug_checkwx(); } -static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, - pgprot_t prot, struct vm_struct *vma, - int flags, unsigned long vm_flags) +static void __init declare_vma(struct vm_struct *vma, + void *va_start, void *va_end, + unsigned long vm_flags) { phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; @@ -546,9 +728,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, BUG_ON(!PAGE_ALIGNED(pa_start)); BUG_ON(!PAGE_ALIGNED(size)); - __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, - early_pgtable_alloc, flags); - if (!(vm_flags & VM_NO_GUARD)) size += PAGE_SIZE; @@ -561,28 +740,20 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, vm_area_add_early(vma); } -static int __init parse_rodata(char *arg) +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static pgprot_t kernel_exec_prot(void) { - int ret = strtobool(arg, &rodata_enabled); - if (!ret) { - rodata_full = false; - return 0; - } - - /* permit 'full' in addition to boolean options */ - if (strcmp(arg, "full")) - return -EINVAL; - - rodata_enabled = true; - rodata_full = true; - return 0; + return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; } -early_param("rodata", parse_rodata); -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static int __init map_entry_trampoline(void) { - pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + int i; + + if (!arm64_kernel_unmapped_at_el0()) + return 0; + + pgprot_t prot = kernel_exec_prot(); phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); /* The trampoline is always mapped and can therefore be global */ @@ -590,18 +761,18 @@ static int __init map_entry_trampoline(void) /* Map only the text into the trampoline page table */ memset(tramp_pg_dir, 0, PGD_SIZE); - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, - prot, __pgd_pgtable_alloc, 0); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, + pgd_pgtable_alloc_init_mm, NO_BLOCK_MAPPINGS); /* Map both the text and data into the kernel page table */ - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { - extern char __entry_tramp_data_start[]; + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, prot); - __set_fixmap(FIX_ENTRY_TRAMP_DATA, - __pa_symbol(__entry_tramp_data_start), - PAGE_KERNEL_RO); - } + if (IS_ENABLED(CONFIG_RELOCATABLE)) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO); return 0; } @@ -609,348 +780,442 @@ core_initcall(map_entry_trampoline); #endif /* - * Create fine-grained mappings for the kernel. + * Declare the VMA areas for the kernel */ -static void __init map_kernel(pgd_t *pgdp) +static void __init declare_kernel_vmas(void) { - static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, - vmlinux_initdata, vmlinux_data; + static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT]; - /* - * External debuggers may need to write directly to the text - * mapping to install SW breakpoints. Allow this (only) when - * explicitly requested with rodata=off. - */ - pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD); + declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD); + declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD); + declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD); + declare_vma(&vmlinux_seg[4], _data, _end, 0); +} - /* - * Only rodata will be remapped with different permissions later on, - * all other segments are allowed to use contiguous mappings. - */ - map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, - VM_NO_GUARD); - map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, - &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); - map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, - &vmlinux_inittext, 0, VM_NO_GUARD); - map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, - &vmlinux_initdata, 0, VM_NO_GUARD); - map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); - - if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { - /* - * The fixmap falls in a separate pgd to the kernel, and doesn't - * live in the carveout for the swapper_pg_dir. We can simply - * re-use the existing dir for the fixmap. - */ - set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), - READ_ONCE(*pgd_offset_k(FIXADDR_START))); - } else if (CONFIG_PGTABLE_LEVELS > 3) { - pgd_t *bm_pgdp; - pud_t *bm_pudp; - /* - * The fixmap shares its top level pgd entry with the kernel - * mapping. This can really only occur when we are running - * with 16k/4 levels, so we can simply reuse the pud level - * entry instead. - */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START); - bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START); - pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); - pud_clear_fixmap(); - } else { - BUG(); - } +void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot, + int level, pte_t *tbl, bool may_use_cont, u64 va_offset); - kasan_copy_shadow(pgdp); -} +static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init, + kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init; -void __init paging_init(void) +static void __init create_idmap(void) { - pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); + u64 start = __pa_symbol(__idmap_text_start); + u64 end = __pa_symbol(__idmap_text_end); + u64 ptep = __pa_symbol(idmap_ptes); - map_kernel(pgdp); - map_mem(pgdp); + __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX, + IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, + __phys_to_virt(ptep) - ptep); - pgd_clear_fixmap(); + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) { + extern u32 __idmap_kpti_flag; + u64 pa = __pa_symbol(&__idmap_kpti_flag); - cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); - init_mm.pgd = swapper_pg_dir; + /* + * The KPTI G-to-nG conversion code needs a read-write mapping + * of its synchronization flag in the ID map. + */ + ptep = __pa_symbol(kpti_ptes); + __pi_map_range(&ptep, pa, pa + sizeof(u32), pa, PAGE_KERNEL, + IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, + __phys_to_virt(ptep) - ptep); + } +} - memblock_free(__pa_symbol(init_pg_dir), - __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); +void __init paging_init(void) +{ + map_mem(swapper_pg_dir); memblock_allow_resize(); + + create_idmap(); + declare_kernel_vmas(); } -/* - * Check whether a kernel address is valid (derived from arch/x86/). - */ -int kern_addr_valid(unsigned long addr) +#ifdef CONFIG_MEMORY_HOTPLUG +static void free_hotplug_page_range(struct page *page, size_t size, + struct vmem_altmap *altmap) { - pgd_t *pgdp; - pud_t *pudp, pud; - pmd_t *pmdp, pmd; - pte_t *ptep, pte; - - if ((((long)addr) >> VA_BITS) != -1UL) - return 0; + if (altmap) { + vmem_altmap_free(altmap, size >> PAGE_SHIFT); + } else { + WARN_ON(PageReserved(page)); + free_pages((unsigned long)page_address(page), get_order(size)); + } +} - pgdp = pgd_offset_k(addr); - if (pgd_none(READ_ONCE(*pgdp))) - return 0; +static void free_hotplug_pgtable_page(struct page *page) +{ + free_hotplug_page_range(page, PAGE_SIZE, NULL); +} - pudp = pud_offset(pgdp, addr); - pud = READ_ONCE(*pudp); - if (pud_none(pud)) - return 0; +static bool pgtable_range_aligned(unsigned long start, unsigned long end, + unsigned long floor, unsigned long ceiling, + unsigned long mask) +{ + start &= mask; + if (start < floor) + return false; - if (pud_sect(pud)) - return pfn_valid(pud_pfn(pud)); + if (ceiling) { + ceiling &= mask; + if (!ceiling) + return false; + } - pmdp = pmd_offset(pudp, addr); - pmd = READ_ONCE(*pmdp); - if (pmd_none(pmd)) - return 0; + if (end - 1 > ceiling - 1) + return false; + return true; +} - if (pmd_sect(pmd)) - return pfn_valid(pmd_pfn(pmd)); +static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + pte_t *ptep, pte; - ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); - if (pte_none(pte)) - return 0; + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = __ptep_get(ptep); + if (pte_none(pte)) + continue; - return pfn_valid(pte_pfn(pte)); + WARN_ON(!pte_present(pte)); + __pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pte_page(pte), + PAGE_SIZE, altmap); + } while (addr += PAGE_SIZE, addr < end); } -#ifdef CONFIG_SPARSEMEM_VMEMMAP -#if !ARM64_SWAPPER_USES_SECTION_MAPS -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, - struct vmem_altmap *altmap) -{ - return vmemmap_populate_basepages(start, end, node); -} -#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, - struct vmem_altmap *altmap) + +static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { - unsigned long addr = start; unsigned long next; - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; + pmd_t *pmdp, pmd; do { next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; - pgdp = vmemmap_pgd_populate(addr, node); - if (!pgdp) - return -ENOMEM; + WARN_ON(!pmd_present(pmd)); + if (pmd_sect(pmd)) { + pmd_clear(pmdp); - pudp = vmemmap_pud_populate(pgdp, addr, node); - if (!pudp) - return -ENOMEM; + /* + * One TLBI should be sufficient here as the PMD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pmd_page(pmd), + PMD_SIZE, altmap); + continue; + } + WARN_ON(!pmd_table(pmd)); + unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); +} - pmdp = pmd_offset(pudp, addr); - if (pmd_none(READ_ONCE(*pmdp))) { - void *p = NULL; +static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + pud_t *pudp, pud; - p = vmemmap_alloc_block_buf(PMD_SIZE, node); - if (!p) - return -ENOMEM; + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; - pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); - } else - vmemmap_verify((pte_t *)pmdp, node, addr, next); - } while (addr = next, addr != end); + WARN_ON(!pud_present(pud)); + if (pud_sect(pud)) { + pud_clear(pudp); - return 0; -} -#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ -void vmemmap_free(unsigned long start, unsigned long end, - struct vmem_altmap *altmap) -{ + /* + * One TLBI should be sufficient here as the PUD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pud_page(pud), + PUD_SIZE, altmap); + continue; + } + WARN_ON(!pud_table(pud)); + unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ -static inline pud_t * fixmap_pud(unsigned long addr) +static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) { - pgd_t *pgdp = pgd_offset_k(addr); - pgd_t pgd = READ_ONCE(*pgdp); + unsigned long next; + p4d_t *p4dp, p4d; - BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; - return pud_offset_kimg(pgdp, addr); + WARN_ON(!p4d_present(p4d)); + unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); } -static inline pmd_t * fixmap_pmd(unsigned long addr) +static void unmap_hotplug_range(unsigned long addr, unsigned long end, + bool free_mapped, struct vmem_altmap *altmap) { - pud_t *pudp = fixmap_pud(addr); - pud_t pud = READ_ONCE(*pudp); + unsigned long next; + pgd_t *pgdp, pgd; - BUG_ON(pud_none(pud) || pud_bad(pud)); + /* + * altmap can only be used as vmemmap mapping backing memory. + * In case the backing memory itself is not being freed, then + * altmap is irrelevant. Warn about this inconsistency when + * encountered. + */ + WARN_ON(!free_mapped && altmap); - return pmd_offset_kimg(pudp, addr); -} + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; -static inline pte_t * fixmap_pte(unsigned long addr) -{ - return &bm_pte[pte_index(addr)]; + WARN_ON(!pgd_present(pgd)); + unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); } -/* - * The p*d_populate functions call virt_to_phys implicitly so they can't be used - * directly on kernel symbols (bm_p*d). This function is called too early to use - * lm_alias so __p*d_populate functions must be used to populate with the - * physical address from __pa_symbol. - */ -void __init early_fixmap_init(void) +static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) { - pgd_t *pgdp, pgd; - pud_t *pudp; - pmd_t *pmdp; - unsigned long addr = FIXADDR_START; + pte_t *ptep, pte; + unsigned long i, start = addr; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = __ptep_get(ptep); - pgdp = pgd_offset_k(addr); - pgd = READ_ONCE(*pgdp); - if (CONFIG_PGTABLE_LEVELS > 3 && - !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { /* - * We only end up here if the kernel mapping and the fixmap - * share the top level pgd entry, which should only happen on - * 16k/4 levels configurations. + * This is just a sanity check here which verifies that + * pte clearing has been done by earlier unmap loops. */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pudp = pud_offset_kimg(pgdp, addr); - } else { - if (pgd_none(pgd)) - __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); - pudp = fixmap_pud(addr); - } - if (pud_none(READ_ONCE(*pudp))) - __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); - pmdp = fixmap_pmd(addr); - __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); + WARN_ON(!pte_none(pte)); + } while (addr += PAGE_SIZE, addr < end); + + if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) + return; /* - * The boot-ioremap range spans multiple pmds, for which - * we are not prepared: + * Check whether we can free the pte page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. */ - BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) - != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); - - if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) - || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { - WARN_ON(1); - pr_warn("pmdp %p != %p, %p\n", - pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), - fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); - pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", - fix_to_virt(FIX_BTMAP_BEGIN)); - pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", - fix_to_virt(FIX_BTMAP_END)); - - pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); - pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); + ptep = pte_offset_kernel(pmdp, 0UL); + for (i = 0; i < PTRS_PER_PTE; i++) { + if (!pte_none(__ptep_get(&ptep[i]))) + return; } + + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(ptep)); } -/* - * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we - * ever need to use IPIs for TLB broadcasting, then we're in trouble here. - */ -void __set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags) +static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) { - unsigned long addr = __fix_to_virt(idx); - pte_t *ptep; + pmd_t *pmdp, pmd; + unsigned long i, next, start = addr; - BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; - ptep = fixmap_pte(addr); + WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); + free_empty_pte_table(pmdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); - if (pgprot_val(flags)) { - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); - } else { - pte_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + if (CONFIG_PGTABLE_LEVELS <= 2) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) + return; + + /* + * Check whether we can free the pmd page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pmdp = pmd_offset(pudp, 0UL); + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(READ_ONCE(pmdp[i]))) + return; } + + pud_clear(pudp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pmdp)); } -void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) { - const u64 dt_virt_base = __fix_to_virt(FIX_FDT); - int offset; - void *dt_virt; + pud_t *pudp, pud; + unsigned long i, next, start = addr; - /* - * Check whether the physical FDT address is set and meets the minimum - * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be - * at least 8 bytes so that we can always access the magic and size - * fields of the FDT header after mapping the first chunk, double check - * here if that is indeed the case. - */ - BUILD_BUG_ON(MIN_FDT_ALIGN < 8); - if (!dt_phys || dt_phys % MIN_FDT_ALIGN) - return NULL; + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); + free_empty_pmd_table(pudp, addr, next, floor, ceiling); + } while (addr = next, addr < end); + + if (!pgtable_l4_enabled()) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK)) + return; /* - * Make sure that the FDT region can be mapped without the need to - * allocate additional translation table pages, so that it is safe - * to call create_mapping_noalloc() this early. - * - * On 64k pages, the FDT will be mapped using PTEs, so we need to - * be in the same PMD as the rest of the fixmap. - * On 4k pages, we'll use section mappings for the FDT so we only - * have to be in the same PUD. + * Check whether we can free the pud page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. */ - BUILD_BUG_ON(dt_virt_base % SZ_2M); + pudp = pud_offset(p4dp, 0UL); + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_none(READ_ONCE(pudp[i]))) + return; + } - BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != - __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); + p4d_clear(p4dp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pudp)); +} - offset = dt_phys % SWAPPER_BLOCK_SIZE; - dt_virt = (void *)dt_virt_base + offset; +static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + p4d_t *p4dp, p4d; + unsigned long i, next, start = addr; - /* map the first chunk so we can read the size from the header */ - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), - dt_virt_base, SWAPPER_BLOCK_SIZE, prot); + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; - if (fdt_magic(dt_virt) != FDT_MAGIC) - return NULL; + WARN_ON(!p4d_present(p4d)); + free_empty_pud_table(p4dp, addr, next, floor, ceiling); + } while (addr = next, addr < end); - *size = fdt_totalsize(dt_virt); - if (*size > MAX_FDT_SIZE) - return NULL; + if (!pgtable_l5_enabled()) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) + return; - if (offset + *size > SWAPPER_BLOCK_SIZE) - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); + /* + * Check whether we can free the p4d page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + p4dp = p4d_offset(pgdp, 0UL); + for (i = 0; i < PTRS_PER_P4D; i++) { + if (!p4d_none(READ_ONCE(p4dp[i]))) + return; + } - return dt_virt; + pgd_clear(pgdp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(p4dp)); } -int __init arch_ioremap_p4d_supported(void) +static void free_empty_tables(unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) { - return 0; + unsigned long next; + pgd_t *pgdp, pgd; + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + free_empty_p4d_table(pgdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); +} +#endif + +void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } -int __init arch_ioremap_pud_supported(void) +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) { - /* - * Only 4k granule supports level 1 block mappings. - * SW table walks can't handle removal of intermediate entries. - */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && - !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); + vmemmap_verify((pte_t *)pmdp, node, addr, next); + + return pmd_sect(READ_ONCE(*pmdp)); +} + +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + /* [start, end] should be within one section */ + WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); + + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) || + (end - start < PAGES_PER_SECTION * sizeof(struct page))) + return vmemmap_populate_basepages(start, end, node, altmap); + else + return vmemmap_populate_hugepages(start, end, node, altmap); } -int __init arch_ioremap_pmd_supported(void) +#ifdef CONFIG_MEMORY_HOTPLUG +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) { - /* See arch_ioremap_pud_supported() */ - return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + unmap_hotplug_range(start, end, true, altmap); + free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); } +#endif /* CONFIG_MEMORY_HOTPLUG */ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { @@ -980,6 +1245,12 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) return 1; } +#ifndef __PAGETABLE_P4D_FOLDED +void p4d_clear_huge(p4d_t *p4dp) +{ +} +#endif + int pud_clear_huge(pud_t *pudp) { if (!pud_sect(READ_ONCE(*pudp))) @@ -1043,43 +1314,305 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) return 1; } -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +#ifdef CONFIG_MEMORY_HOTPLUG +static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) { - return 0; /* Don't attempt a block mapping */ + unsigned long end = start + size; + + WARN_ON(pgdir != init_mm.pgd); + WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); + + unmap_hotplug_range(start, end, false, NULL); + free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); +} + +struct range arch_get_mappable_range(void) +{ + struct range mhp_range; + u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); + u64 end_linear_pa = __pa(PAGE_END - 1); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + /* + * Check for a wrap, it is possible because of randomized linear + * mapping the start physical address is actually bigger than + * the end physical address. In this case set start to zero + * because [0, end_linear_pa] range must still be able to cover + * all addressable physical addresses. + */ + if (start_linear_pa > end_linear_pa) + start_linear_pa = 0; + } + + WARN_ON(start_linear_pa > end_linear_pa); + + /* + * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] + * accommodating both its ends but excluding PAGE_END. Max physical + * range which can be mapped inside this linear mapping range, must + * also be derived from its end points. + */ + mhp_range.start = start_linear_pa; + mhp_range.end = end_linear_pa; + + return mhp_range; } -#ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size, - struct mhp_restrictions *restrictions) + struct mhp_params *params) { - int flags = 0; + int ret, flags = NO_EXEC_MAPPINGS; - if (rodata_full || debug_pagealloc_enabled()) - flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + VM_BUG_ON(!mhp_range_allowed(start, size, true)); + + if (can_set_direct_map()) + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), - size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); + size, params->pgprot, pgd_pgtable_alloc_init_mm, + flags); memblock_clear_nomap(start, size); - return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, - restrictions); + ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + params); + if (ret) + __remove_pgd_mapping(swapper_pg_dir, + __phys_to_virt(start), size); + else { + /* Address of hotplugged memory can be smaller */ + max_pfn = max(max_pfn, PFN_UP(start + size)); + max_low_pfn = max_pfn; + } + + return ret; } -void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap) + +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - struct zone *zone; + + __remove_pages(start_pfn, nr_pages, altmap); + __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); +} + +/* + * This memory hotplug notifier helps prevent boot memory from being + * inadvertently removed as it blocks pfn range offlining process in + * __offline_pages(). Hence this prevents both offlining as well as + * removal process for boot memory which is initially always online. + * In future if and when boot memory could be removed, this notifier + * should be dropped and free_hotplug_page_range() should handle any + * reserved pages allocated during boot. + */ +static int prevent_bootmem_remove_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct mem_section *ms; + struct memory_notify *arg = data; + unsigned long end_pfn = arg->start_pfn + arg->nr_pages; + unsigned long pfn = arg->start_pfn; + + if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) + return NOTIFY_OK; + + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + unsigned long start = PFN_PHYS(pfn); + unsigned long end = start + (1UL << PA_SECTION_SHIFT); + + ms = __pfn_to_section(pfn); + if (!early_section(ms)) + continue; + + if (action == MEM_GOING_OFFLINE) { + /* + * Boot memory removal is not supported. Prevent + * it via blocking any attempted offline request + * for the boot memory and just report it. + */ + pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); + return NOTIFY_BAD; + } else if (action == MEM_OFFLINE) { + /* + * This should have never happened. Boot memory + * offlining should have been prevented by this + * very notifier. Probably some memory removal + * procedure might have changed which would then + * require further debug. + */ + pr_err("Boot memory [%lx %lx] offlined\n", start, end); + + /* + * Core memory hotplug does not process a return + * code from the notifier for MEM_OFFLINE events. + * The error condition has been reported. Return + * from here as if ignored. + */ + return NOTIFY_DONE; + } + } + return NOTIFY_OK; +} + +static struct notifier_block prevent_bootmem_remove_nb = { + .notifier_call = prevent_bootmem_remove_notifier, +}; + +/* + * This ensures that boot memory sections on the platform are online + * from early boot. Memory sections could not be prevented from being + * offlined, unless for some reason they are not online to begin with. + * This helps validate the basic assumption on which the above memory + * event notifier works to prevent boot memory section offlining and + * its possible removal. + */ +static void validate_bootmem_online(void) +{ + phys_addr_t start, end, addr; + struct mem_section *ms; + u64 i; /* - * FIXME: Cleanup page tables (also in arch_add_memory() in case - * adding fails). Until then, this function should only be used - * during memory hotplug (adding memory), not for memory - * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be - * unlocked yet. + * Scanning across all memblock might be expensive + * on some big memory systems. Hence enable this + * validation only with DEBUG_VM. */ - zone = page_zone(pfn_to_page(start_pfn)); - __remove_pages(zone, start_pfn, nr_pages, altmap); + if (!IS_ENABLED(CONFIG_DEBUG_VM)) + return; + + for_each_mem_range(i, &start, &end) { + for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { + ms = __pfn_to_section(PHYS_PFN(addr)); + + /* + * All memory ranges in the system at this point + * should have been marked as early sections. + */ + WARN_ON(!early_section(ms)); + + /* + * Memory notifier mechanism here to prevent boot + * memory offlining depends on the fact that each + * early section memory on the system is initially + * online. Otherwise a given memory section which + * is already offline will be overlooked and can + * be removed completely. Call out such sections. + */ + if (!online_section(ms)) + pr_err("Boot memory [%llx %llx] is offline, can be removed\n", + addr, addr + (1UL << PA_SECTION_SHIFT)); + } + } +} + +static int __init prevent_bootmem_remove_init(void) +{ + int ret = 0; + + if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return ret; + + validate_bootmem_online(); + ret = register_memory_notifier(&prevent_bootmem_remove_nb); + if (ret) + pr_err("%s: Notifier registration failed %d\n", __func__, ret); + + return ret; +} +early_initcall(prevent_bootmem_remove_init); +#endif + +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { + /* + * Break-before-make (BBM) is required for all user space mappings + * when the permission changes from executable to non-executable + * in cases where cpu is affected with errata #2645198. + */ + if (pte_user_exec(ptep_get(ptep))) + return ptep_clear_flush(vma, addr, ptep); + } + return ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + +void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + set_pte_at(vma->vm_mm, addr, ptep, pte); +} + +/* + * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, + * avoiding the possibility of conflicting TLB entries being allocated. + */ +void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) +{ + typedef void (ttbr_replace_func)(phys_addr_t); + extern ttbr_replace_func idmap_cpu_replace_ttbr1; + ttbr_replace_func *replace_phys; + unsigned long daif; + + /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ + phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); + + if (cnp) + ttbr1 |= TTBR_CNP_BIT; + + replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); + + cpu_install_idmap(); + + /* + * We really don't want to take *any* exceptions while TTBR1 is + * in the process of being replaced so mask everything. + */ + daif = local_daif_save(); + replace_phys(ttbr1); + local_daif_restore(daif); + + cpu_uninstall_idmap(); +} + +#ifdef CONFIG_ARCH_HAS_PKEYS +int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) +{ + u64 new_por; + u64 old_por; + + if (!system_supports_poe()) + return -ENOSPC; + + /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + if (WARN_ON_ONCE(pkey >= arch_max_pkey())) + return -EINVAL; + + /* Set the bits we need in POR: */ + new_por = POE_RWX; + if (init_val & PKEY_DISABLE_WRITE) + new_por &= ~POE_W; + if (init_val & PKEY_DISABLE_ACCESS) + new_por &= ~POE_RW; + if (init_val & PKEY_DISABLE_READ) + new_por &= ~POE_R; + if (init_val & PKEY_DISABLE_EXECUTE) + new_por &= ~POE_X; + + /* Shift the bits in to the correct place in POR for pkey: */ + new_por = POR_ELx_PERM_PREP(pkey, new_por); + + /* Get old POR and mask off any old bits in place: */ + old_por = read_sysreg_s(SYS_POR_EL0); + old_por &= ~(POE_MASK << POR_ELx_PERM_SHIFT(pkey)); + + /* Write old part along with new part: */ + write_sysreg_s(old_por | new_por, SYS_POR_EL0); + + return 0; } #endif diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c new file mode 100644 index 000000000000..63e8d72f202a --- /dev/null +++ b/arch/arm64/mm/mteswap.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/pagemap.h> +#include <linux/xarray.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <asm/mte.h> + +static DEFINE_XARRAY(mte_pages); + +void *mte_allocate_tag_storage(void) +{ + /* tags granule is 16 bytes, 2 tags stored per byte */ + return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL); +} + +void mte_free_tag_storage(char *storage) +{ + kfree(storage); +} + +int mte_save_tags(struct page *page) +{ + void *tag_storage, *ret; + + if (!page_mte_tagged(page)) + return 0; + + tag_storage = mte_allocate_tag_storage(); + if (!tag_storage) + return -ENOMEM; + + mte_save_page_tags(page_address(page), tag_storage); + + /* lookup the swap entry.val from the page */ + ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage, + GFP_KERNEL); + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { + mte_free_tag_storage(tag_storage); + return xa_err(ret); + } else if (ret) { + /* Entry is being replaced, free the old entry */ + mte_free_tag_storage(ret); + } + + return 0; +} + +void mte_restore_tags(swp_entry_t entry, struct page *page) +{ + void *tags = xa_load(&mte_pages, entry.val); + + if (!tags) + return; + + if (try_page_mte_tagging(page)) { + mte_restore_page_tags(page_address(page), tags); + set_page_mte_tagged(page); + } +} + +void mte_invalidate_tags(int type, pgoff_t offset) +{ + swp_entry_t entry = swp_entry(type, offset); + void *tags = xa_erase(&mte_pages, entry.val); + + mte_free_tag_storage(tags); +} + +static inline void __mte_invalidate_tags(struct page *page) +{ + swp_entry_t entry = page_swap_entry(page); + + mte_invalidate_tags(swp_type(entry), swp_offset(entry)); +} + +void mte_invalidate_tags_area(int type) +{ + swp_entry_t entry = swp_entry(type, 0); + swp_entry_t last_entry = swp_entry(type + 1, 0); + void *tags; + + XA_STATE(xa_state, &mte_pages, entry.val); + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, last_entry.val - 1) { + __xa_erase(&mte_pages, xa_state.xa_index); + mte_free_tag_storage(tags); + } + xa_unlock(&mte_pages); +} + +int arch_prepare_to_swap(struct folio *folio) +{ + long i, nr; + int err; + + if (!system_supports_mte()) + return 0; + + nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) { + err = mte_save_tags(folio_page(folio, i)); + if (err) + goto out; + } + return 0; + +out: + while (i--) + __mte_invalidate_tags(folio_page(folio, i)); + return err; +} + +void arch_swap_restore(swp_entry_t entry, struct folio *folio) +{ + long i, nr; + + if (!system_supports_mte()) + return; + + nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) { + mte_restore_tags(entry, folio_page(folio, i)); + entry.val++; + } +} diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c deleted file mode 100644 index 4decf1659700..000000000000 --- a/arch/arm64/mm/numa.c +++ /dev/null @@ -1,470 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * NUMA support, based on the x86 implementation. - * - * Copyright (C) 2015 Cavium Inc. - * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com> - */ - -#define pr_fmt(fmt) "NUMA: " fmt - -#include <linux/acpi.h> -#include <linux/memblock.h> -#include <linux/module.h> -#include <linux/of.h> - -#include <asm/acpi.h> -#include <asm/sections.h> - -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); -nodemask_t numa_nodes_parsed __initdata; -static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; - -static int numa_distance_cnt; -static u8 *numa_distance; -bool numa_off; - -static __init int numa_parse_early_param(char *opt) -{ - if (!opt) - return -EINVAL; - if (str_has_prefix(opt, "off")) - numa_off = true; - - return 0; -} -early_param("numa", numa_parse_early_param); - -cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; -EXPORT_SYMBOL(node_to_cpumask_map); - -#ifdef CONFIG_DEBUG_PER_CPU_MAPS - -/* - * Returns a pointer to the bitmask of CPUs on Node 'node'. - */ -const struct cpumask *cpumask_of_node(int node) -{ - if (WARN_ON(node >= nr_node_ids)) - return cpu_none_mask; - - if (WARN_ON(node_to_cpumask_map[node] == NULL)) - return cpu_online_mask; - - return node_to_cpumask_map[node]; -} -EXPORT_SYMBOL(cpumask_of_node); - -#endif - -static void numa_update_cpu(unsigned int cpu, bool remove) -{ - int nid = cpu_to_node(cpu); - - if (nid == NUMA_NO_NODE) - return; - - if (remove) - cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); - else - cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); -} - -void numa_add_cpu(unsigned int cpu) -{ - numa_update_cpu(cpu, false); -} - -void numa_remove_cpu(unsigned int cpu) -{ - numa_update_cpu(cpu, true); -} - -void numa_clear_node(unsigned int cpu) -{ - numa_remove_cpu(cpu); - set_cpu_numa_node(cpu, NUMA_NO_NODE); -} - -/* - * Allocate node_to_cpumask_map based on number of available nodes - * Requires node_possible_map to be valid. - * - * Note: cpumask_of_node() is not valid until after this is done. - * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) - */ -static void __init setup_node_to_cpumask_map(void) -{ - int node; - - /* setup nr_node_ids if not done yet */ - if (nr_node_ids == MAX_NUMNODES) - setup_nr_node_ids(); - - /* allocate and clear the mapping */ - for (node = 0; node < nr_node_ids; node++) { - alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); - cpumask_clear(node_to_cpumask_map[node]); - } - - /* cpumask_of_node() will now work */ - pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); -} - -/* - * Set the cpu to node and mem mapping - */ -void numa_store_cpu_info(unsigned int cpu) -{ - set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); -} - -void __init early_map_cpu_to_node(unsigned int cpu, int nid) -{ - /* fallback to node 0 */ - if (nid < 0 || nid >= MAX_NUMNODES || numa_off) - nid = 0; - - cpu_to_node_map[cpu] = nid; - - /* - * We should set the numa node of cpu0 as soon as possible, because it - * has already been set up online before. cpu_to_node(0) will soon be - * called. - */ - if (!cpu) - set_cpu_numa_node(cpu, nid); -} - -#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; -EXPORT_SYMBOL(__per_cpu_offset); - -static int __init early_cpu_to_node(int cpu) -{ - return cpu_to_node_map[cpu]; -} - -static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) -{ - return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); -} - -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, - size_t align) -{ - int nid = early_cpu_to_node(cpu); - - return memblock_alloc_try_nid(size, align, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); -} - -static void __init pcpu_fc_free(void *ptr, size_t size) -{ - memblock_free_early(__pa(ptr), size); -} - -void __init setup_per_cpu_areas(void) -{ - unsigned long delta; - unsigned int cpu; - int rc; - - /* - * Always reserve area for module percpu variables. That's - * what the legacy allocator did. - */ - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, - PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, - pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); - if (rc < 0) - panic("Failed to initialize percpu areas."); - - delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; - for_each_possible_cpu(cpu) - __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; -} -#endif - -/** - * numa_add_memblk() - Set node id to memblk - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - int ret; - - ret = memblock_set_node(start, (end - start), &memblock.memory, nid); - if (ret < 0) { - pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", - start, (end - 1), nid); - return ret; - } - - node_set(nid, numa_nodes_parsed); - return ret; -} - -/* - * Initialize NODE_DATA for a node on the local memory - */ -static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) -{ - const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); - u64 nd_pa; - void *nd; - int tnid; - - if (start_pfn >= end_pfn) - pr_info("Initmem setup node %d [<memory-less node>]\n", nid); - - nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); - if (!nd_pa) - panic("Cannot allocate %zu bytes for node %d data\n", - nd_size, nid); - - nd = __va(nd_pa); - - /* report and initialize */ - pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n", - nd_pa, nd_pa + nd_size - 1); - tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) - pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); - - node_data[nid] = nd; - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); - NODE_DATA(nid)->node_id = nid; - NODE_DATA(nid)->node_start_pfn = start_pfn; - NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; -} - -/* - * numa_free_distance - * - * The current table is freed. - */ -void __init numa_free_distance(void) -{ - size_t size; - - if (!numa_distance) - return; - - size = numa_distance_cnt * numa_distance_cnt * - sizeof(numa_distance[0]); - - memblock_free(__pa(numa_distance), size); - numa_distance_cnt = 0; - numa_distance = NULL; -} - -/* - * Create a new NUMA distance table. - */ -static int __init numa_alloc_distance(void) -{ - size_t size; - u64 phys; - int i, j; - - size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); - phys = memblock_find_in_range(0, PFN_PHYS(max_pfn), - size, PAGE_SIZE); - if (WARN_ON(!phys)) - return -ENOMEM; - - memblock_reserve(phys, size); - - numa_distance = __va(phys); - numa_distance_cnt = nr_node_ids; - - /* fill with the default distances */ - for (i = 0; i < numa_distance_cnt; i++) - for (j = 0; j < numa_distance_cnt; j++) - numa_distance[i * numa_distance_cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - - pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt); - - return 0; -} - -/** - * numa_set_distance() - Set inter node NUMA distance from node to node. - * @from: the 'from' node to set distance - * @to: the 'to' node to set distance - * @distance: NUMA distance - * - * Set the distance from node @from to @to to @distance. - * If distance table doesn't exist, a warning is printed. - * - * If @from or @to is higher than the highest known node or lower than zero - * or @distance doesn't make sense, the call is ignored. - */ -void __init numa_set_distance(int from, int to, int distance) -{ - if (!numa_distance) { - pr_warn_once("Warning: distance table not allocated yet\n"); - return; - } - - if (from >= numa_distance_cnt || to >= numa_distance_cnt || - from < 0 || to < 0) { - pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - if ((u8)distance != distance || - (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - numa_distance[from * numa_distance_cnt + to] = distance; -} - -/* - * Return NUMA distance @from to @to - */ -int __node_distance(int from, int to) -{ - if (from >= numa_distance_cnt || to >= numa_distance_cnt) - return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return numa_distance[from * numa_distance_cnt + to]; -} -EXPORT_SYMBOL(__node_distance); - -static int __init numa_register_nodes(void) -{ - int nid; - struct memblock_region *mblk; - - /* Check that valid nid is set to memblks */ - for_each_memblock(memory, mblk) - if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { - pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", - mblk->nid, mblk->base, - mblk->base + mblk->size - 1); - return -EINVAL; - } - - /* Finally register nodes. */ - for_each_node_mask(nid, numa_nodes_parsed) { - unsigned long start_pfn, end_pfn; - - get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); - setup_node_data(nid, start_pfn, end_pfn); - node_set_online(nid); - } - - /* Setup online nodes to actual nodes*/ - node_possible_map = numa_nodes_parsed; - - return 0; -} - -static int __init numa_init(int (*init_func)(void)) -{ - int ret; - - nodes_clear(numa_nodes_parsed); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - - ret = numa_alloc_distance(); - if (ret < 0) - return ret; - - ret = init_func(); - if (ret < 0) - goto out_free_distance; - - if (nodes_empty(numa_nodes_parsed)) { - pr_info("No NUMA configuration found\n"); - ret = -EINVAL; - goto out_free_distance; - } - - ret = numa_register_nodes(); - if (ret < 0) - goto out_free_distance; - - setup_node_to_cpumask_map(); - - return 0; -out_free_distance: - numa_free_distance(); - return ret; -} - -/** - * dummy_numa_init() - Fallback dummy NUMA init - * - * Used if there's no underlying NUMA architecture, NUMA initialization - * fails, or NUMA is disabled on the command line. - * - * Must online at least one node (node 0) and add memory blocks that cover all - * allowed memory. It is unlikely that this function fails. - * - * Return: 0 on success, -errno on failure. - */ -static int __init dummy_numa_init(void) -{ - int ret; - struct memblock_region *mblk; - - if (numa_off) - pr_info("NUMA disabled\n"); /* Forced off on command line. */ - pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", - memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); - - for_each_memblock(memory, mblk) { - ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); - if (!ret) - continue; - - pr_err("NUMA init failed\n"); - return ret; - } - - numa_off = true; - return 0; -} - -/** - * arm64_numa_init() - Initialize NUMA - * - * Try each configured NUMA initialization method until one succeeds. The - * last fallback is dummy single node config encomapssing whole memory. - */ -void __init arm64_numa_init(void) -{ - if (!numa_off) { - if (!acpi_disabled && !numa_init(arm64_acpi_numa_init)) - return; - if (acpi_disabled && !numa_init(of_numa_init)) - return; - } - - numa_init(dummy_numa_init); -} - -/* - * We hope that we will be hotplugging memory on nodes we already know about, - * such that acpi_get_node() succeeds and we never fall back to this... - */ -int memory_add_physaddr_to_nid(u64 addr) -{ - pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr); - return 0; -} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 9ce7bd9d4d9c..04d4a8f676db 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -5,12 +5,15 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> +#include <linux/mem_encrypt.h> #include <linux/sched.h> #include <linux/vmalloc.h> -#include <asm/pgtable.h> +#include <asm/cacheflush.h> +#include <asm/pgtable-prot.h> #include <asm/set_memory.h> #include <asm/tlbflush.h> +#include <asm/kfence.h> struct page_change_data { pgprot_t set_mask; @@ -19,15 +22,30 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); +bool can_set_direct_map(void) +{ + /* + * rodata_full, DEBUG_PAGEALLOC and a Realm guest all require linear + * map to be mapped at page granularity, so that it is possible to + * protect/unprotect single pages. + * + * KFENCE pool requires page-granular mapping if initialized late. + * + * Realms need to make pages shared/protected at page granularity. + */ + return rodata_full || debug_pagealloc_enabled() || + arm64_kfence_can_set_direct_map() || is_realm_world(); +} + static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = __ptep_get(ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); - set_pte(ptep, pte); + __set_pte(ptep, pte); return 0; } @@ -46,7 +64,13 @@ static int __change_memory_common(unsigned long start, unsigned long size, ret = apply_to_page_range(&init_mm, start, size, change_page_range, &data); - flush_tlb_kernel_range(start, start + size); + /* + * If the memory is being made valid without changing any other bits + * then a TLBI isn't required as a non-valid entry cannot be cached in + * the TLB. + */ + if (pgprot_val(set_mask) != PTE_VALID || pgprot_val(clear_mask)) + flush_tlb_kernel_range(start, start + size); return ret; } @@ -54,7 +78,7 @@ static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; - unsigned long size = PAGE_SIZE*numpages; + unsigned long size = PAGE_SIZE * numpages; unsigned long end = start + size; struct vm_struct *area; int i; @@ -72,16 +96,16 @@ static int change_memory_common(unsigned long addr, int numpages, * we are operating on does not result in such splitting. * * Let's restrict ourselves to mappings created by vmalloc (or vmap). - * Those are guaranteed to consist entirely of page mappings, and - * splitting is never needed. + * Disallow VM_ALLOW_HUGE_VMAP mappings to guarantee that only page + * mappings are updated and splitting is never needed. * * So check whether the [addr, addr + size) interval is entirely * covered by precisely one VM area that has the VM_ALLOC flag set. */ area = find_vm_area((void *)addr); if (!area || - end > (unsigned long)area->addr + area->size || - !(area->flags & VM_ALLOC)) + end > (unsigned long)kasan_reset_tag(area->addr) + area->size || + ((area->flags & (VM_ALLOC | VM_ALLOW_HUGE_VMAP)) != VM_ALLOC)) return -EINVAL; if (!numpages) @@ -126,13 +150,13 @@ int set_memory_nx(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, __pgprot(PTE_PXN), - __pgprot(0)); + __pgprot(PTE_MAYBE_GP)); } int set_memory_x(unsigned long addr, int numpages) { return change_memory_common(addr, numpages, - __pgprot(0), + __pgprot(PTE_MAYBE_GP), __pgprot(PTE_PXN)); } @@ -155,7 +179,7 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -170,7 +194,7 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -178,18 +202,115 @@ int set_direct_map_default_noflush(struct page *page) PAGE_SIZE, change_page_range, &data); } +static int __set_memory_enc_dec(unsigned long addr, + int numpages, + bool encrypt) +{ + unsigned long set_prot = 0, clear_prot = 0; + phys_addr_t start, end; + int ret; + + if (!is_realm_world()) + return 0; + + if (!__is_lm_address(addr)) + return -EINVAL; + + start = __virt_to_phys(addr); + end = start + numpages * PAGE_SIZE; + + if (encrypt) + clear_prot = PROT_NS_SHARED; + else + set_prot = PROT_NS_SHARED; + + /* + * Break the mapping before we make any changes to avoid stale TLB + * entries or Synchronous External Aborts caused by RIPAS_EMPTY + */ + ret = __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(set_prot), + __pgprot(clear_prot | PTE_VALID)); + + if (ret) + return ret; + + if (encrypt) + ret = rsi_set_memory_range_protected(start, end); + else + ret = rsi_set_memory_range_shared(start, end); + + if (ret) + return ret; + + return __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(PTE_VALID), + __pgprot(0)); +} + +static int realm_set_memory_encrypted(unsigned long addr, int numpages) +{ + int ret = __set_memory_enc_dec(addr, numpages, true); + + /* + * If the request to change state fails, then the only sensible cause + * of action for the caller is to leak the memory + */ + WARN(ret, "Failed to encrypt memory, %d pages will be leaked", + numpages); + + return ret; +} + +static int realm_set_memory_decrypted(unsigned long addr, int numpages) +{ + int ret = __set_memory_enc_dec(addr, numpages, false); + + WARN(ret, "Failed to decrypt memory, %d pages will be leaked", + numpages); + + return ret; +} + +static const struct arm64_mem_crypt_ops realm_crypt_ops = { + .encrypt = realm_set_memory_encrypted, + .decrypt = realm_set_memory_decrypted, +}; + +int realm_register_memory_enc_ops(void) +{ + return arm64_mem_crypt_ops_register(&realm_crypt_ops); +} + +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + unsigned long addr = (unsigned long)page_address(page); + + if (!can_set_direct_map()) + return 0; + + return set_memory_valid(addr, nr, valid); +} + +#ifdef CONFIG_DEBUG_PAGEALLOC +/* + * This is - apart from the return value - doing the same + * thing as the new set_direct_map_valid_noflush() function. + * + * Unify? Explain the conceptual differences? + */ void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); } +#endif /* CONFIG_DEBUG_PAGEALLOC */ /* * This function is used to determine if a linear map page has been marked as - * not-valid. Walk the page table and check the PTE_VALID bit. This is based - * on kern_addr_valid(), which almost does what we need. + * not-valid. Walk the page table and check the PTE_VALID bit. * * Because this is only called on the kernel linear map, p?d_sect() implies * p?d_present(). When debug_pagealloc is enabled, sections mappings are @@ -198,19 +319,21 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) bool kernel_page_present(struct page *page) { pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp, pud; pmd_t *pmdp, pmd; pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - if (!debug_pagealloc_enabled() && !rodata_full) - return true; - pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; - pudp = pud_offset(pgdp, addr); + p4dp = p4d_offset(pgdp, addr); + if (p4d_none(READ_ONCE(*p4dp))) + return false; + + pudp = pud_offset(p4dp, addr); pud = READ_ONCE(*pudp); if (pud_none(pud)) return false; @@ -225,5 +348,5 @@ bool kernel_page_present(struct page *page) return true; ptep = pte_offset_kernel(pmdp, addr); - return pte_valid(READ_ONCE(*ptep)); + return pte_valid(__ptep_get(ptep)); } diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 4a64089e5771..8160cff35089 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -17,27 +17,38 @@ static struct kmem_cache *pgd_cache __ro_after_init; +static bool pgdir_is_page_size(void) +{ + if (PGD_SIZE == PAGE_SIZE) + return true; + if (CONFIG_PGTABLE_LEVELS == 4) + return !pgtable_l4_enabled(); + if (CONFIG_PGTABLE_LEVELS == 5) + return !pgtable_l5_enabled(); + return false; +} + pgd_t *pgd_alloc(struct mm_struct *mm) { gfp_t gfp = GFP_PGTABLE_USER; - if (PGD_SIZE == PAGE_SIZE) - return (pgd_t *)__get_free_page(gfp); + if (pgdir_is_page_size()) + return __pgd_alloc(mm, 0); else return kmem_cache_alloc(pgd_cache, gfp); } void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - if (PGD_SIZE == PAGE_SIZE) - free_page((unsigned long)pgd); + if (pgdir_is_page_size()) + __pgd_free(mm, pgd); else kmem_cache_free(pgd_cache, pgd); } void __init pgtable_cache_init(void) { - if (PGD_SIZE == PAGE_SIZE) + if (pgdir_is_page_size()) return; #ifdef CONFIG_ARM64_PA_BITS_52 diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c index 67a9ba9eaa96..7d94e09b01b3 100644 --- a/arch/arm64/mm/physaddr.c +++ b/arch/arm64/mm/physaddr.c @@ -9,8 +9,8 @@ phys_addr_t __virt_to_phys(unsigned long x) { - WARN(!__is_lm_address(x), - "virt_to_phys used for non-linear address: %pK (%pS)\n", + WARN(!__is_lm_address(__tag_reset(x)), + "virt_to_phys used for non-linear address: %p (%pS)\n", (void *)x, (void *)x); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592d1fbc..80d470aa469d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -9,13 +9,18 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <linux/pgtable.h> +#include <linux/cfi_types.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> +#include <asm/asm_pointer_auth.h> #include <asm/hwcap.h> -#include <asm/pgtable.h> +#include <asm/kernel-pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/cpufeature.h> #include <asm/alternative.h> +#include <asm/smp.h> +#include <asm/sysreg.h> #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K @@ -31,26 +36,47 @@ #define TCR_KASLR_FLAGS 0 #endif -#define TCR_SMP_FLAGS TCR_SHARED - /* PTWs cacheable, inner/outer WBWA */ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_FLAGS TCR_TBI1 +#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 +#else +#define TCR_KASAN_SW_FLAGS 0 +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 +#elif defined(CONFIG_ARM64_MTE) +/* + * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on + * TBI being enabled at EL1. + */ +#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 #else -#define TCR_KASAN_FLAGS 0 +#define TCR_MTE_FLAGS 0 #endif -#define MAIR(attr, mt) ((attr) << ((mt) * 8)) +/* + * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and + * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. + */ +#define MAIR_EL1_SET \ + (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context * * x0: virtual address of context pointer + * + * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. */ -ENTRY(cpu_do_suspend) +SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 @@ -61,11 +87,7 @@ ENTRY(cpu_do_suspend) mrs x9, mdscr_el1 mrs x10, oslsr_el1 mrs x11, sctlr_el1 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x12, tpidr_el1 -alternative_else - mrs x12, tpidr_el2 -alternative_endif + get_this_cpu_offset x12 mrs x13, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] @@ -73,22 +95,33 @@ alternative_endif stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] + /* + * Save x18 as it may be used as a platform register, e.g. by shadow + * call stack. + */ + str x18, [x0, #96] ret -ENDPROC(cpu_do_suspend) +SYM_FUNC_END(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context * * x0: Address of context pointer */ - .pushsection ".idmap.text", "awx" -ENTRY(cpu_do_resume) +SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] ldp x6, x8, [x0, #32] ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] ldp x13, x14, [x0, #80] + /* + * Restore x18, as it may be used as a platform register, and clear + * the buffer to minimize the risk of exposure when used for shadow + * call stack. + */ + ldr x18, [x0, #96] + str xzr, [x0, #96] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 @@ -100,22 +133,10 @@ ENTRY(cpu_do_resume) msr tcr_el1, x8 msr vbar_el1, x9 - - /* - * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking - * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug - * exception. Mask them until local_daif_restore() in cpu_suspend() - * resets them. - */ - disable_daif msr mdscr_el1, x10 msr sctlr_el1, x12 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - msr tpidr_el1, x13 -alternative_else - msr tpidr_el2, x13 -alternative_endif + set_this_cpu_offset x13 msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 @@ -124,49 +145,22 @@ alternative_endif ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 alternative_if ARM64_HAS_RAS_EXTN msr_s SYS_DISR_EL1, xzr alternative_else_nop_endif + ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 isb ret -ENDPROC(cpu_do_resume) - .popsection +SYM_FUNC_END(cpu_do_resume) #endif -/* - * cpu_do_switch_mm(pgd_phys, tsk) - * - * Set the translation table base pointer to be pgd_phys. - * - * - pgd_phys - physical address of new TTB - */ -ENTRY(cpu_do_switch_mm) - mrs x2, ttbr1_el1 - mmid x1, x1 // get mm->context.id - phys_to_ttbr x3, x0 - -alternative_if ARM64_HAS_CNP - cbz x1, 1f // skip CNP for reserved ASID - orr x3, x3, #TTBR_CNP_BIT -1: -alternative_else_nop_endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - bfi x3, x1, #48, #16 // set the ASID field in TTBR0 -#endif - bfi x2, x1, #48, #16 // set the ASID - msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) - isb - msr ttbr0_el1, x3 // now update TTBR0 - isb - b post_ttbr_update_workaround // Back to C code... -ENDPROC(cpu_do_switch_mm) - - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 - adrp \tmp1, empty_zero_page + adrp \tmp1, reserved_pg_dir phys_to_ttbr \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 @@ -182,50 +176,85 @@ ENDPROC(cpu_do_switch_mm) * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ -ENTRY(idmap_cpu_replace_ttbr1) - save_and_disable_daif flags=x2 - +SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb - restore_daif x2 - ret -ENDPROC(idmap_cpu_replace_ttbr1) +SYM_FUNC_END(idmap_cpu_replace_ttbr1) +SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - .pushsection ".idmap.text", "awx" - - .macro __idmap_kpti_get_pgtable_ent, type - dc cvac, cur_\()\type\()p // Ensure any existing dirty - dmb sy // lines are written back before - ldr \type, [cur_\()\type\()p] // loading the entry - tbz \type, #0, skip_\()\type // Skip invalid and - tbnz \type, #11, skip_\()\type // non-global entries + +#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \ + PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE) + + .pushsection ".idmap.text", "a" + + .macro pte_to_phys, phys, pte + and \phys, \pte, #PTE_ADDR_LOW +#ifdef CONFIG_ARM64_PA_BITS_52 + and \pte, \pte, #PTE_ADDR_HIGH + orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT +#endif .endm - .macro __idmap_kpti_put_pgtable_ent_ng, type + .macro kpti_mk_tbl_ng, type, num_entries + add end_\type\()p, cur_\type\()p, #\num_entries * 8 +.Ldo_\type: + ldr \type, [cur_\type\()p], #8 // Load the entry and advance + tbz \type, #0, .Lnext_\type // Skip invalid and + tbnz \type, #11, .Lnext_\type // non-global entries orr \type, \type, #PTE_NG // Same bit for blocks and pages - str \type, [cur_\()\type\()p] // Update the entry and ensure - dmb sy // that it is visible to all - dc civac, cur_\()\type\()p // CPUs. + str \type, [cur_\type\()p, #-8] // Update the entry + .ifnc \type, pte + tbnz \type, #1, .Lderef_\type + .endif +.Lnext_\type: + cmp cur_\type\()p, end_\type\()p + b.ne .Ldo_\type + .endm + + /* + * Dereference the current table entry and map it into the temporary + * fixmap slot associated with the current level. + */ + .macro kpti_map_pgtbl, type, level + str xzr, [temp_pte, #8 * (\level + 2)] // break before make + dsb nshst + add pte, temp_pte, #PAGE_SIZE * (\level + 2) + lsr pte, pte, #12 + tlbi vaae1, pte + dsb nsh + isb + + phys_to_pte pte, cur_\type\()p + add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2) + orr pte, pte, pte_flags + str pte, [temp_pte, #8 * (\level + 2)] + dsb nshst .endm /* - * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, + * unsigned long temp_pte_va) * * Called exactly once from stop_machine context by each CPU found during boot. */ -__idmap_kpti_flag: - .long 1 -ENTRY(idmap_kpti_install_ng_mappings) + .pushsection ".data", "aw", %progbits +SYM_DATA(__idmap_kpti_flag, .long 1) + .popsection + +SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) cpu .req w0 + temp_pte .req x0 num_cpus .req w1 - swapper_pa .req x2 + pte_flags .req x1 + temp_pgd_phys .req x2 swapper_ttb .req x3 flag_ptr .req x4 cur_pgdp .req x5 @@ -233,234 +262,232 @@ ENTRY(idmap_kpti_install_ng_mappings) pgd .req x7 cur_pudp .req x8 end_pudp .req x9 - pud .req x10 cur_pmdp .req x11 end_pmdp .req x12 - pmd .req x13 cur_ptep .req x14 end_ptep .req x15 pte .req x16 + valid .req x17 + cur_p4dp .req x19 + end_p4dp .req x20 + mov x5, x3 // preserve temp_pte arg mrs swapper_ttb, ttbr1_el1 - restore_ttbr1 swapper_ttb - adr flag_ptr, __idmap_kpti_flag + adr_l flag_ptr, __idmap_kpti_flag cbnz cpu, __idmap_kpti_secondary +#if CONFIG_PGTABLE_LEVELS > 4 + stp x29, x30, [sp, #-32]! + mov x29, sp + stp x19, x20, [sp, #16] +#endif + /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe - ldaxr w18, [flag_ptr] - eor w18, w18, num_cpus - cbnz w18, 1b - - /* We need to walk swapper, so turn off the MMU. */ - pre_disable_mmu_workaround - mrs x18, sctlr_el1 - bic x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 - isb + ldaxr w17, [flag_ptr] + eor w17, w17, num_cpus + cbnz w17, 1b - /* Everybody is enjoying the idmap, so we can rewrite swapper. */ - /* PGD */ - mov cur_pgdp, swapper_pa - add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) -do_pgd: __idmap_kpti_get_pgtable_ent pgd - tbnz pgd, #1, walk_puds -next_pgd: - __idmap_kpti_put_pgtable_ent_ng pgd -skip_pgd: - add cur_pgdp, cur_pgdp, #8 - cmp cur_pgdp, end_pgdp - b.ne do_pgd - - /* Publish the updated tables and nuke all the TLBs */ - dsb sy - tlbi vmalle1is - dsb ish + /* Switch to the temporary page tables on this CPU only */ + __idmap_cpu_set_reserved_ttbr1 x8, x9 + offset_ttbr1 temp_pgd_phys, x8 + msr ttbr1_el1, temp_pgd_phys isb - /* We're done: fire up the MMU again */ - mrs x18, sctlr_el1 - orr x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 - isb + mov temp_pte, x5 + mov_q pte_flags, KPTI_NG_PTE_FLAGS + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ +#ifdef CONFIG_ARM64_LPA2 /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. + * If LPA2 support is configured, but 52-bit virtual addressing is not + * enabled at runtime, we will fall back to one level of paging less, + * and so we have to walk swapper_pg_dir as if we dereferenced its + * address from a PGD level entry, and terminate the PGD level loop + * right after. */ - ic iallu - dsb nsh + adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level + mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop +alternative_if_not ARM64_HAS_VA52 + b .Lderef_pgd // skip to the next level +alternative_else_nop_endif + /* + * LPA2 based 52-bit virtual addressing requires 52-bit physical + * addressing to be enabled as well. In this case, the shareability + * bits are repurposed as physical address bits, and should not be + * set in pte_flags. + */ + bic pte_flags, pte_flags, #PTE_SHARED +#endif + + /* PGD */ + adrp cur_pgdp, swapper_pg_dir + kpti_map_pgtbl pgd, -1 + kpti_mk_tbl_ng pgd, PTRS_PER_PGD + + /* Ensure all the updated entries are visible to secondary CPUs */ + dsb ishst + + /* We're done: fire up swapper_pg_dir again */ + __idmap_cpu_set_reserved_ttbr1 x8, x9 + msr ttbr1_el1, swapper_ttb isb /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] +#if CONFIG_PGTABLE_LEVELS > 4 + ldp x19, x20, [sp, #16] + ldp x29, x30, [sp], #32 +#endif ret +.Lderef_pgd: + /* P4D */ + .if CONFIG_PGTABLE_LEVELS > 4 + p4d .req x30 + pte_to_phys cur_p4dp, pgd + kpti_map_pgtbl p4d, 0 + kpti_mk_tbl_ng p4d, PTRS_PER_P4D + b .Lnext_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 4 */ + p4d .req pgd + .set .Lnext_p4d, .Lnext_pgd + .endif + +.Lderef_p4d: /* PUD */ -walk_puds: - .if CONFIG_PGTABLE_LEVELS > 3 - pte_to_phys cur_pudp, pgd - add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) -do_pud: __idmap_kpti_get_pgtable_ent pud - tbnz pud, #1, walk_pmds -next_pud: - __idmap_kpti_put_pgtable_ent_ng pud -skip_pud: - add cur_pudp, cur_pudp, 8 - cmp cur_pudp, end_pudp - b.ne do_pud - b next_pgd - .else /* CONFIG_PGTABLE_LEVELS <= 3 */ - mov pud, pgd - b walk_pmds -next_pud: - b next_pgd + .if CONFIG_PGTABLE_LEVELS > 3 + pud .req x10 + pte_to_phys cur_pudp, p4d + kpti_map_pgtbl pud, 1 + kpti_mk_tbl_ng pud, PTRS_PER_PUD + b .Lnext_p4d + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + pud .req pgd + .set .Lnext_pud, .Lnext_pgd .endif +.Lderef_pud: /* PMD */ -walk_pmds: - .if CONFIG_PGTABLE_LEVELS > 2 + .if CONFIG_PGTABLE_LEVELS > 2 + pmd .req x13 pte_to_phys cur_pmdp, pud - add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) -do_pmd: __idmap_kpti_get_pgtable_ent pmd - tbnz pmd, #1, walk_ptes -next_pmd: - __idmap_kpti_put_pgtable_ent_ng pmd -skip_pmd: - add cur_pmdp, cur_pmdp, #8 - cmp cur_pmdp, end_pmdp - b.ne do_pmd - b next_pud - .else /* CONFIG_PGTABLE_LEVELS <= 2 */ - mov pmd, pud - b walk_ptes -next_pmd: - b next_pud + kpti_map_pgtbl pmd, 2 + kpti_mk_tbl_ng pmd, PTRS_PER_PMD + b .Lnext_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + pmd .req pgd + .set .Lnext_pmd, .Lnext_pgd .endif +.Lderef_pmd: /* PTE */ -walk_ptes: pte_to_phys cur_ptep, pmd - add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) -do_pte: __idmap_kpti_get_pgtable_ent pte - __idmap_kpti_put_pgtable_ent_ng pte -skip_pte: - add cur_ptep, cur_ptep, #8 - cmp cur_ptep, end_ptep - b.ne do_pte - b next_pmd + kpti_map_pgtbl pte, 3 + kpti_mk_tbl_ng pte, PTRS_PER_PTE + b .Lnext_pmd + + .unreq cpu + .unreq temp_pte + .unreq num_cpus + .unreq pte_flags + .unreq temp_pgd_phys + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte + .unreq valid + .unreq cur_p4dp + .unreq end_p4dp + .unreq p4d /* Secondary CPUs end up here */ __idmap_kpti_secondary: /* Uninstall swapper before surgery begins */ - __idmap_cpu_set_reserved_ttbr1 x18, x17 + __idmap_cpu_set_reserved_ttbr1 x16, x17 /* Increment the flag to let the boot CPU we're ready */ -1: ldxr w18, [flag_ptr] - add w18, w18, #1 - stxr w17, w18, [flag_ptr] +1: ldxr w16, [flag_ptr] + add w16, w16, #1 + stxr w17, w16, [flag_ptr] cbnz w17, 1b /* Wait for the boot CPU to finish messing around with swapper */ sevl 1: wfe - ldxr w18, [flag_ptr] - cbnz w18, 1b + ldxr w16, [flag_ptr] + cbnz w16, 1b /* All done, act like nothing happened */ - offset_ttbr1 swapper_ttb, x18 msr ttbr1_el1, swapper_ttb isb ret - .unreq cpu - .unreq num_cpus - .unreq swapper_pa .unreq swapper_ttb .unreq flag_ptr - .unreq cur_pgdp - .unreq end_pgdp - .unreq pgd - .unreq cur_pudp - .unreq end_pudp - .unreq pud - .unreq cur_pmdp - .unreq end_pmdp - .unreq pmd - .unreq cur_ptep - .unreq end_ptep - .unreq pte -ENDPROC(idmap_kpti_install_ng_mappings) +SYM_FUNC_END(idmap_kpti_install_ng_mappings) .popsection #endif /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "awx" -ENTRY(__cpu_setup) + .pushsection ".idmap.text", "a" +SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 - isb // Unmask debug exceptions now, - enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 - /* - * Memory region attributes for LPAE: - * - * n = AttrIndx[2:0] - * n MAIR - * DEVICE_nGnRnE 000 00000000 - * DEVICE_nGnRE 001 00000100 - * DEVICE_GRE 010 00001100 - * NORMAL_NC 011 01000100 - * NORMAL 100 11111111 - * NORMAL_WT 101 10111011 - */ - ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ - MAIR(0x04, MT_DEVICE_nGnRE) | \ - MAIR(0x0c, MT_DEVICE_GRE) | \ - MAIR(0x44, MT_NORMAL_NC) | \ - MAIR(0xff, MT_NORMAL) | \ - MAIR(0xbb, MT_NORMAL_WT) - msr mair_el1, x5 - /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET + msr cpacr_el1, xzr // Reset cpacr_el1 + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 + reset_amuserenr_el0 x1 // Disable AMU access from EL0 + /* - * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for - * both user and kernel. + * Default values for VMSA control registers. These will be adjusted + * below depending on detected CPU features. */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ - TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS - tcr_clear_errata_bits x10, x9, x5 + mair .req x17 + tcr .req x16 + tcr2 .req x15 + mov_q mair, MAIR_EL1_SET + mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ + TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ + TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS + mov tcr2, xzr + + tcr_clear_errata_bits tcr, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 - ldr_l x9, vabits_actual - sub x9, xzr, x9 - add x9, x9, #64 - tcr_set_t1sz x10, x9 -#else - ldr_l x9, idmap_t0sz + mov x9, #64 - VA_BITS +alternative_if ARM64_HAS_VA52 + tcr_set_t1sz tcr, x9 +#ifdef CONFIG_ARM64_LPA2 + orr tcr, tcr, #TCR_DS +#endif +alternative_else_nop_endif #endif - tcr_set_t0sz x10, x9 /* * Set the IPS bits in TCR_EL1. */ - tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 + tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. @@ -468,11 +495,46 @@ ENTRY(__cpu_setup) * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 - and x9, x9, #0xf + ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4 cbz x9, 1f - orr x10, x10, #TCR_HA // hardware Access flag update + orr tcr, tcr, #TCR_HA // hardware Access flag update +#ifdef CONFIG_ARM64_HAFT + cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT + b.lt 1f + orr tcr2, tcr2, TCR2_EL1_HAFT +#endif /* CONFIG_ARM64_HAFT */ 1: #endif /* CONFIG_ARM64_HW_AFDBM */ - msr tcr_el1, x10 + msr mair_el1, mair + msr tcr_el1, tcr + + mrs_s x1, SYS_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 + cbz x1, .Lskip_indirection + + mov_q x0, PIE_E0_ASM + msr REG_PIRE0_EL1, x0 + mov_q x0, PIE_E1_ASM + msr REG_PIR_EL1, x0 + + orr tcr2, tcr2, TCR2_EL1_PIE + msr REG_TCR2_EL1, x0 + +.Lskip_indirection: + + mrs_s x1, SYS_ID_AA64MMFR3_EL1 + ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4 + cbz x1, 1f + msr REG_TCR2_EL1, tcr2 +1: + + /* + * Prepare SCTLR + */ + mov_q x0, INIT_SCTLR_EL1_MMU_ON ret // return to head.S -ENDPROC(__cpu_setup) + + .unreq mair + .unreq tcr + .unreq tcr2 +SYM_FUNC_END(__cpu_setup) diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c new file mode 100644 index 000000000000..421a5de806c6 --- /dev/null +++ b/arch/arm64/mm/ptdump.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Debug helper to dump the current kernel pagetables of the system + * so that we can see what the various memory ranges are set to. + * + * Derived from x86 and arm implementation: + * (C) Copyright 2008 Intel Corporation + * + * Author: Arjan van de Ven <arjan@linux.intel.com> + */ +#include <linux/debugfs.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/ptdump.h> +#include <linux/sched.h> +#include <linux/seq_file.h> + +#include <asm/fixmap.h> +#include <asm/kasan.h> +#include <asm/memory.h> +#include <asm/pgtable-hwdef.h> +#include <asm/ptdump.h> + + +#define pt_dump_seq_printf(m, fmt, args...) \ +({ \ + if (m) \ + seq_printf(m, fmt, ##args); \ +}) + +#define pt_dump_seq_puts(m, fmt) \ +({ \ + if (m) \ + seq_printf(m, fmt); \ +}) + +static const struct ptdump_prot_bits pte_bits[] = { + { + .mask = PTE_VALID, + .val = PTE_VALID, + .set = " ", + .clear = "F", + }, { + .mask = PTE_USER, + .val = PTE_USER, + .set = "USR", + .clear = " ", + }, { + .mask = PTE_RDONLY, + .val = PTE_RDONLY, + .set = "ro", + .clear = "RW", + }, { + .mask = PTE_PXN, + .val = PTE_PXN, + .set = "NX", + .clear = "x ", + }, { + .mask = PTE_SHARED, + .val = PTE_SHARED, + .set = "SHD", + .clear = " ", + }, { + .mask = PTE_AF, + .val = PTE_AF, + .set = "AF", + .clear = " ", + }, { + .mask = PTE_NG, + .val = PTE_NG, + .set = "NG", + .clear = " ", + }, { + .mask = PTE_CONT, + .val = PTE_CONT, + .set = "CON", + .clear = " ", + }, { + .mask = PMD_TYPE_MASK, + .val = PMD_TYPE_SECT, + .set = "BLK", + .clear = " ", + }, { + .mask = PTE_UXN, + .val = PTE_UXN, + .set = "UXN", + .clear = " ", + }, { + .mask = PTE_GP, + .val = PTE_GP, + .set = "GP", + .clear = " ", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), + .set = "DEVICE/nGnRnE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), + .set = "DEVICE/nGnRE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_NC), + .set = "MEM/NORMAL-NC", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL), + .set = "MEM/NORMAL", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_TAGGED), + .set = "MEM/NORMAL-TAGGED", + } +}; + +static struct ptdump_pg_level kernel_pg_levels[] __ro_after_init = { + { /* pgd */ + .name = "PGD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* p4d */ + .name = "P4D", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pud */ + .name = "PUD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pmd */ + .name = "PMD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pte */ + .name = "PTE", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, +}; + +static void dump_prot(struct ptdump_pg_state *st, const struct ptdump_prot_bits *bits, + size_t num) +{ + unsigned i; + + for (i = 0; i < num; i++, bits++) { + const char *s; + + if ((st->current_prot & bits->mask) == bits->val) + s = bits->set; + else + s = bits->clear; + + if (s) + pt_dump_seq_printf(st->seq, " %s", s); + } +} + +static void note_prot_uxn(struct ptdump_pg_state *st, unsigned long addr) +{ + if (!st->check_wx) + return; + + if ((st->current_prot & PTE_UXN) == PTE_UXN) + return; + + WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n", + (void *)st->start_address, (void *)st->start_address); + + st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; +} + +static void note_prot_wx(struct ptdump_pg_state *st, unsigned long addr) +{ + if (!st->check_wx) + return; + if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) + return; + if ((st->current_prot & PTE_PXN) == PTE_PXN) + return; + + WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n", + (void *)st->start_address, (void *)st->start_address); + + st->wx_pages += (addr - st->start_address) / PAGE_SIZE; +} + +void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, + pteval_t val) +{ + struct ptdump_pg_state *st = container_of(pt_st, struct ptdump_pg_state, ptdump); + struct ptdump_pg_level *pg_level = st->pg_level; + static const char units[] = "KMGTPE"; + ptdesc_t prot = 0; + + /* check if the current level has been folded dynamically */ + if (st->mm && ((level == 1 && mm_p4d_folded(st->mm)) || + (level == 2 && mm_pud_folded(st->mm)))) + level = 0; + + if (level >= 0) + prot = val & pg_level[level].mask; + + if (st->level == -1) { + st->level = level; + st->current_prot = prot; + st->start_address = addr; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } else if (prot != st->current_prot || level != st->level || + addr >= st->marker[1].start_address) { + const char *unit = units; + unsigned long delta; + + if (st->current_prot) { + note_prot_uxn(st, addr); + note_prot_wx(st, addr); + } + + pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", + st->start_address, addr); + + delta = (addr - st->start_address) >> 10; + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; + } + pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit, + pg_level[st->level].name); + if (st->current_prot && pg_level[st->level].bits) + dump_prot(st, pg_level[st->level].bits, + pg_level[st->level].num); + pt_dump_seq_puts(st->seq, "\n"); + + if (addr >= st->marker[1].start_address) { + st->marker++; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + + st->start_address = addr; + st->current_prot = prot; + st->level = level; + } + + if (addr >= st->marker[1].start_address) { + st->marker++; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + +} + +void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) +{ + note_page(pt_st, addr, 4, pte_val(pte)); +} + +void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) +{ + note_page(pt_st, addr, 3, pmd_val(pmd)); +} + +void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) +{ + note_page(pt_st, addr, 2, pud_val(pud)); +} + +void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) +{ + note_page(pt_st, addr, 1, p4d_val(p4d)); +} + +void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) +{ + note_page(pt_st, addr, 0, pgd_val(pgd)); +} + +void note_page_flush(struct ptdump_state *pt_st) +{ + pte_t pte_zero = {0}; + + note_page(pt_st, 0, -1, pte_val(pte_zero)); +} + +void ptdump_walk(struct seq_file *s, struct ptdump_info *info) +{ + unsigned long end = ~0UL; + struct ptdump_pg_state st; + + if (info->base_addr < TASK_SIZE_64) + end = TASK_SIZE_64; + + st = (struct ptdump_pg_state){ + .seq = s, + .marker = info->markers, + .mm = info->mm, + .pg_level = &kernel_pg_levels[0], + .level = -1, + .ptdump = { + .note_page_pte = note_page_pte, + .note_page_pmd = note_page_pmd, + .note_page_pud = note_page_pud, + .note_page_p4d = note_page_p4d, + .note_page_pgd = note_page_pgd, + .note_page_flush = note_page_flush, + .range = (struct ptdump_range[]){ + {info->base_addr, end}, + {0, 0} + } + } + }; + + ptdump_walk_pgd(&st.ptdump, info->mm, NULL); +} + +static void __init ptdump_initialize(void) +{ + unsigned i, j; + + for (i = 0; i < ARRAY_SIZE(kernel_pg_levels); i++) + if (kernel_pg_levels[i].bits) + for (j = 0; j < kernel_pg_levels[i].num; j++) + kernel_pg_levels[i].mask |= kernel_pg_levels[i].bits[j].mask; +} + +static struct ptdump_info kernel_ptdump_info __ro_after_init = { + .mm = &init_mm, +}; + +bool ptdump_check_wx(void) +{ + struct ptdump_pg_state st = { + .seq = NULL, + .marker = (struct addr_marker[]) { + { 0, NULL}, + { -1, NULL}, + }, + .pg_level = &kernel_pg_levels[0], + .level = -1, + .check_wx = true, + .ptdump = { + .note_page_pte = note_page_pte, + .note_page_pmd = note_page_pmd, + .note_page_pud = note_page_pud, + .note_page_p4d = note_page_p4d, + .note_page_pgd = note_page_pgd, + .note_page_flush = note_page_flush, + .range = (struct ptdump_range[]) { + {_PAGE_OFFSET(vabits_actual), ~0UL}, + {0, 0} + } + } + }; + + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + + if (st.wx_pages || st.uxn_pages) { + pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", + st.wx_pages, st.uxn_pages); + + return false; + } else { + pr_info("Checked W+X mappings: passed, no W+X pages found\n"); + + return true; + } +} + +static int __init ptdump_init(void) +{ + u64 page_offset = _PAGE_OFFSET(vabits_actual); + u64 vmemmap_start = (u64)virt_to_page((void *)page_offset); + struct addr_marker m[] = { + { PAGE_OFFSET, "Linear Mapping start" }, + { PAGE_END, "Linear Mapping end" }, +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + { KASAN_SHADOW_START, "Kasan shadow start" }, + { KASAN_SHADOW_END, "Kasan shadow end" }, +#endif + { MODULES_VADDR, "Modules start" }, + { MODULES_END, "Modules end" }, + { VMALLOC_START, "vmalloc() area" }, + { VMALLOC_END, "vmalloc() end" }, + { vmemmap_start, "vmemmap start" }, + { VMEMMAP_END, "vmemmap end" }, + { PCI_IO_START, "PCI I/O start" }, + { PCI_IO_END, "PCI I/O end" }, + { FIXADDR_TOT_START, "Fixmap start" }, + { FIXADDR_TOP, "Fixmap end" }, + { -1, NULL }, + }; + static struct addr_marker address_markers[ARRAY_SIZE(m)] __ro_after_init; + + kernel_ptdump_info.markers = memcpy(address_markers, m, sizeof(m)); + kernel_ptdump_info.base_addr = page_offset; + + ptdump_initialize(); + ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); + return 0; +} +device_initcall(ptdump_init); diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 064163f25592..68bf1a125502 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/debugfs.h> +#include <linux/memory_hotplug.h> #include <linux/seq_file.h> #include <asm/ptdump.h> @@ -7,12 +8,15 @@ static int ptdump_show(struct seq_file *m, void *v) { struct ptdump_info *info = m->private; - ptdump_walk_pgd(m, info); + + get_online_mems(); + ptdump_walk(m, info); + put_online_mems(); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); -void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name) { debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/arm64/mm/trans_pgd-asm.S b/arch/arm64/mm/trans_pgd-asm.S new file mode 100644 index 000000000000..021c31573bcb --- /dev/null +++ b/arch/arm64/mm/trans_pgd-asm.S @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Copyright (c) 2021, Microsoft Corporation. + * Pasha Tatashin <pasha.tatashin@soleen.com> + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/kvm_asm.h> + +.macro invalid_vector label +SYM_CODE_START_LOCAL(\label) + .align 7 + b \label +SYM_CODE_END(\label) +.endm + +.macro el1_sync_vector +SYM_CODE_START_LOCAL(el1_sync) + .align 7 + cmp x0, #HVC_SET_VECTORS /* Called from hibernate */ + b.ne 1f + msr vbar_el2, x1 + mov x0, xzr + eret +1: cmp x0, #HVC_SOFT_RESTART /* Called from kexec */ + b.ne 2f + mov x0, x2 + mov x2, x4 + mov x4, x1 + mov x1, x3 + br x4 +2: /* Unexpected argument, set an error */ + mov_q x0, HVC_STUB_ERR + eret +SYM_CODE_END(el1_sync) +.endm + +SYM_CODE_START(trans_pgd_stub_vectors) + invalid_vector hyp_stub_el2t_sync_invalid // Synchronous EL2t + invalid_vector hyp_stub_el2t_irq_invalid // IRQ EL2t + invalid_vector hyp_stub_el2t_fiq_invalid // FIQ EL2t + invalid_vector hyp_stub_el2t_error_invalid // Error EL2t + + invalid_vector hyp_stub_el2h_sync_invalid // Synchronous EL2h + invalid_vector hyp_stub_el2h_irq_invalid // IRQ EL2h + invalid_vector hyp_stub_el2h_fiq_invalid // FIQ EL2h + invalid_vector hyp_stub_el2h_error_invalid // Error EL2h + + el1_sync_vector // Synchronous 64-bit EL1 + invalid_vector hyp_stub_el1_irq_invalid // IRQ 64-bit EL1 + invalid_vector hyp_stub_el1_fiq_invalid // FIQ 64-bit EL1 + invalid_vector hyp_stub_el1_error_invalid // Error 64-bit EL1 + + invalid_vector hyp_stub_32b_el1_sync_invalid // Synchronous 32-bit EL1 + invalid_vector hyp_stub_32b_el1_irq_invalid // IRQ 32-bit EL1 + invalid_vector hyp_stub_32b_el1_fiq_invalid // FIQ 32-bit EL1 + invalid_vector hyp_stub_32b_el1_error_invalid // Error 32-bit EL1 + .align 11 +SYM_INNER_LABEL(__trans_pgd_stub_vectors_end, SYM_L_LOCAL) +SYM_CODE_END(trans_pgd_stub_vectors) + +# Check the trans_pgd_stub_vectors didn't overflow +.org . - (__trans_pgd_stub_vectors_end - trans_pgd_stub_vectors) + SZ_2K diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c new file mode 100644 index 000000000000..18543b603c77 --- /dev/null +++ b/arch/arm64/mm/trans_pgd.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Transitional page tables for kexec and hibernate + * + * This file derived from: arch/arm64/kernel/hibernate.c + * + * Copyright (c) 2021, Microsoft Corporation. + * Pasha Tatashin <pasha.tatashin@soleen.com> + * + */ + +/* + * Transitional tables are used during system transferring from one world to + * another: such as during hibernate restore, and kexec reboots. During these + * phases one cannot rely on page table not being overwritten. This is because + * hibernate and kexec can overwrite the current page tables during transition. + */ + +#include <asm/trans_pgd.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <linux/suspend.h> +#include <linux/bug.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/kfence.h> + +static void *trans_alloc(struct trans_pgd_info *info) +{ + return info->trans_alloc_page(info->trans_alloc_arg); +} + +static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) +{ + pte_t pte = __ptep_get(src_ptep); + + if (pte_valid(pte)) { + /* + * Resume will overwrite areas that may be marked + * read only (code, rodata). Clear the RDONLY bit from + * the temporary mappings we use during restore. + */ + __set_pte(dst_ptep, pte_mkwrite_novma(pte)); + } else if (!pte_none(pte)) { + /* + * debug_pagealloc will removed the PTE_VALID bit if + * the page isn't in use by the resume kernel. It may have + * been in use by the original kernel, in which case we need + * to put it back in our copy to do the restore. + * + * Other cases include kfence / vmalloc / memfd_secret which + * may call `set_direct_map_invalid_noflush()`. + * + * Before marking this entry valid, check the pfn should + * be mapped. + */ + BUG_ON(!pfn_valid(pte_pfn(pte))); + + __set_pte(dst_ptep, pte_mkvalid(pte_mkwrite_novma(pte))); + } +} + +static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp, + pmd_t *src_pmdp, unsigned long start, unsigned long end) +{ + pte_t *src_ptep; + pte_t *dst_ptep; + unsigned long addr = start; + + dst_ptep = trans_alloc(info); + if (!dst_ptep) + return -ENOMEM; + pmd_populate_kernel(NULL, dst_pmdp, dst_ptep); + dst_ptep = pte_offset_kernel(dst_pmdp, start); + + src_ptep = pte_offset_kernel(src_pmdp, start); + do { + _copy_pte(dst_ptep, src_ptep, addr); + } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end); + + return 0; +} + +static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp, + pud_t *src_pudp, unsigned long start, unsigned long end) +{ + pmd_t *src_pmdp; + pmd_t *dst_pmdp; + unsigned long next; + unsigned long addr = start; + + if (pud_none(READ_ONCE(*dst_pudp))) { + dst_pmdp = trans_alloc(info); + if (!dst_pmdp) + return -ENOMEM; + pud_populate(NULL, dst_pudp, dst_pmdp); + } + dst_pmdp = pmd_offset(dst_pudp, start); + + src_pmdp = pmd_offset(src_pudp, start); + do { + pmd_t pmd = READ_ONCE(*src_pmdp); + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd)) + continue; + if (pmd_table(pmd)) { + if (copy_pte(info, dst_pmdp, src_pmdp, addr, next)) + return -ENOMEM; + } else { + set_pmd(dst_pmdp, + __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY)); + } + } while (dst_pmdp++, src_pmdp++, addr = next, addr != end); + + return 0; +} + +static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp, + p4d_t *src_p4dp, unsigned long start, + unsigned long end) +{ + pud_t *dst_pudp; + pud_t *src_pudp; + unsigned long next; + unsigned long addr = start; + + if (p4d_none(READ_ONCE(*dst_p4dp))) { + dst_pudp = trans_alloc(info); + if (!dst_pudp) + return -ENOMEM; + p4d_populate(NULL, dst_p4dp, dst_pudp); + } + dst_pudp = pud_offset(dst_p4dp, start); + + src_pudp = pud_offset(src_p4dp, start); + do { + pud_t pud = READ_ONCE(*src_pudp); + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + continue; + if (pud_table(pud)) { + if (copy_pmd(info, dst_pudp, src_pudp, addr, next)) + return -ENOMEM; + } else { + set_pud(dst_pudp, + __pud(pud_val(pud) & ~PUD_SECT_RDONLY)); + } + } while (dst_pudp++, src_pudp++, addr = next, addr != end); + + return 0; +} + +static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp, + pgd_t *src_pgdp, unsigned long start, + unsigned long end) +{ + p4d_t *dst_p4dp; + p4d_t *src_p4dp; + unsigned long next; + unsigned long addr = start; + + if (pgd_none(READ_ONCE(*dst_pgdp))) { + dst_p4dp = trans_alloc(info); + if (!dst_p4dp) + return -ENOMEM; + pgd_populate(NULL, dst_pgdp, dst_p4dp); + } + + dst_p4dp = p4d_offset(dst_pgdp, start); + src_p4dp = p4d_offset(src_pgdp, start); + do { + next = p4d_addr_end(addr, end); + if (p4d_none(READ_ONCE(*src_p4dp))) + continue; + if (copy_pud(info, dst_p4dp, src_p4dp, addr, next)) + return -ENOMEM; + } while (dst_p4dp++, src_p4dp++, addr = next, addr != end); + + return 0; +} + +static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp, + unsigned long start, unsigned long end) +{ + unsigned long next; + unsigned long addr = start; + pgd_t *src_pgdp = pgd_offset_k(start); + + dst_pgdp = pgd_offset_pgd(dst_pgdp, start); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(READ_ONCE(*src_pgdp))) + continue; + if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next)) + return -ENOMEM; + } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); + + return 0; +} + +/* + * Create trans_pgd and copy linear map. + * info: contains allocator and its argument + * dst_pgdp: new page table that is created, and to which map is copied. + * start: Start of the interval (inclusive). + * end: End of the interval (exclusive). + * + * Returns 0 on success, and -ENOMEM on failure. + */ +int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp, + unsigned long start, unsigned long end) +{ + int rc; + pgd_t *trans_pgd = trans_alloc(info); + + if (!trans_pgd) { + pr_err("Failed to allocate memory for temporary page tables.\n"); + return -ENOMEM; + } + + rc = copy_page_tables(info, trans_pgd, start, end); + if (!rc) + *dst_pgdp = trans_pgd; + + return rc; +} + +/* + * The page we want to idmap may be outside the range covered by VA_BITS that + * can be built using the kernel's p?d_populate() helpers. As a one off, for a + * single page, we build these page tables bottom up and just assume that will + * need the maximum T0SZ. + * + * Returns 0 on success, and -ENOMEM on failure. + * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to + * maximum T0SZ for this page. + */ +int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, + unsigned long *t0sz, void *page) +{ + phys_addr_t dst_addr = virt_to_phys(page); + unsigned long pfn = __phys_to_pfn(dst_addr); + int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47; + int bits_mapped = PAGE_SHIFT - 4; + unsigned long level_mask, prev_level_entry, *levels[4]; + int this_level, index, level_lsb, level_msb; + + dst_addr &= PAGE_MASK; + prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX)); + + for (this_level = 3; this_level >= 0; this_level--) { + levels[this_level] = trans_alloc(info); + if (!levels[this_level]) + return -ENOMEM; + + level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level); + level_msb = min(level_lsb + bits_mapped, max_msb); + level_mask = GENMASK_ULL(level_msb, level_lsb); + + index = (dst_addr & level_mask) >> level_lsb; + *(levels[this_level] + index) = prev_level_entry; + + pfn = virt_to_pfn(levels[this_level]); + prev_level_entry = pte_val(pfn_pte(pfn, + __pgprot(PMD_TYPE_TABLE))); + + if (level_msb == max_msb) + break; + } + + *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn)); + *t0sz = TCR_T0SZ(max_msb + 1); + + return 0; +} + +/* + * Create a copy of the vector table so we can call HVC_SET_VECTORS or + * HVC_SOFT_RESTART from contexts where the table may be overwritten. + */ +int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info, + phys_addr_t *el2_vectors) +{ + void *hyp_stub = trans_alloc(info); + + if (!hyp_stub) + return -ENOMEM; + *el2_vectors = virt_to_phys(hyp_stub); + memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN); + caches_clean_inval_pou((unsigned long)hyp_stub, + (unsigned long)hyp_stub + + ARM64_VECTOR_TABLE_LEN); + dcache_clean_inval_poc((unsigned long)hyp_stub, + (unsigned long)hyp_stub + + ARM64_VECTOR_TABLE_LEN); + + return 0; +} |