diff options
Diffstat (limited to 'arch/arm64/include/asm/mmu_context.h')
| -rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 256 |
1 files changed, 176 insertions, 80 deletions
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3257895a9b5e..cc80af59c69e 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -1,42 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Based on arch/arm/include/asm/mmu_context.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H -#define FALKOR_RESERVED_ASID 1 - -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/compiler.h> #include <linux/sched.h> #include <linux/sched/hotplug.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> +#include <linux/pkeys.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> +#include <asm/daifflags.h> +#include <asm/gcs.h> #include <asm/proc-fns.h> -#include <asm-generic/mm_hooks.h> #include <asm/cputype.h> -#include <asm/pgtable.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> +extern bool rodata_full; + static inline void contextidr_thread_switch(struct task_struct *next) { if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) @@ -47,49 +38,45 @@ static inline void contextidr_thread_switch(struct task_struct *next) } /* - * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. + * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. */ -static inline void cpu_set_reserved_ttbr0(void) +static inline void cpu_set_reserved_ttbr0_nosync(void) { - unsigned long ttbr = __pa_symbol(empty_zero_page); + unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); write_sysreg(ttbr, ttbr0_el1); +} + +static inline void cpu_set_reserved_ttbr0(void) +{ + cpu_set_reserved_ttbr0_nosync(); isb(); } -/* - * TCR.T0SZ value to use when the ID map is active. Usually equals - * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in - * physical memory, in which case it will be smaller. - */ -extern u64 idmap_t0sz; +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); -static inline bool __cpu_uses_extended_idmap(void) +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) { - return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && - unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); + BUG_ON(pgd == swapper_pg_dir); + cpu_do_switch_mm(virt_to_phys(pgd),mm); } /* - * Set TCR.T0SZ to its default value (based on VA_BITS) + * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { - unsigned long tcr; + unsigned long tcr = read_sysreg(tcr_el1); - if (!__cpu_uses_extended_idmap()) + if ((tcr & TCR_EL1_T0SZ_MASK) == t0sz) return; - tcr = read_sysreg(tcr_el1); - tcr &= ~TCR_T0SZ_MASK; - tcr |= t0sz << TCR_T0SZ_OFFSET; + tcr &= ~TCR_EL1_T0SZ_MASK; + tcr |= t0sz; write_sysreg(tcr, tcr_el1); isb(); } -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) -#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) - /* * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. * @@ -108,7 +95,7 @@ static inline void cpu_uninstall_idmap(void) cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - cpu_set_default_tcr_t0sz(); + __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)); if (mm != &init_mm && !system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); @@ -118,28 +105,50 @@ static inline void cpu_install_idmap(void) { cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - cpu_set_idmap_tcr_t0sz(); + __cpu_set_tcr_t0sz(TCR_T0SZ(IDMAP_VA_BITS)); cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } /* - * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, - * avoiding the possibility of conflicting TLB entries being allocated. + * Load our new page tables. A strict BBM approach requires that we ensure that + * TLBs are free of any entries that may overlap with the global mappings we are + * about to install. + * + * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime + * services), while for a userspace-driven test_resume cycle it points to + * userspace page tables (and we must point it at a zero page ourselves). + * + * We change T0SZ as part of installing the idmap. This is undone by + * cpu_uninstall_idmap() in __cpu_suspend_exit(). */ -static inline void cpu_replace_ttbr1(pgd_t *pgd) +static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) { - typedef void (ttbr_replace_func)(phys_addr_t); - extern ttbr_replace_func idmap_cpu_replace_ttbr1; - ttbr_replace_func *replace_phys; + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + __cpu_set_tcr_t0sz(t0sz); - phys_addr_t pgd_phys = virt_to_phys(pgd); + /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */ + write_sysreg(ttbr0, ttbr0_el1); + isb(); +} - replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); +void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp); - cpu_install_idmap(); - replace_phys(pgd_phys); - cpu_uninstall_idmap(); +static inline void cpu_enable_swapper_cnp(void) +{ + __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), true); +} + +static inline void cpu_replace_ttbr1(pgd_t *pgdp) +{ + /* + * Only for early TTBR1 replacement before cpucaps are finalized and + * before we've decided whether to use CNP. + */ + WARN_ON(system_capabilities_finalized()); + __cpu_replace_ttbr1(pgdp, false); } /* @@ -151,22 +160,41 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd) * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you * take CPU migration into account. */ -#define destroy_context(mm) do { } while(0) -void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); +void check_and_switch_context(struct mm_struct *mm); -#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) +#define init_new_context(tsk, mm) init_new_context(tsk, mm) +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + refcount_set(&mm->context.pinned, 0); -/* - * This is called when "tsk" is about to enter lazy TLB mode. - * - * mm: describes the currently active mm context - * tsk: task which is entering lazy tlb - * cpu: cpu number which is entering lazy tlb - * - * tsk->mm will be NULL - */ -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + /* pkey 0 is the default, so always reserve it. */ + mm->context.pkey_allocation_map = BIT(0); + + return 0; +} + +static inline void arch_dup_pkeys(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + /* Duplicate the oldmm pkey state in mm: */ + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +{ + arch_dup_pkeys(oldmm, mm); + + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) { } @@ -174,11 +202,17 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { - if (system_uses_ttbr0_pan()) { - BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; - } + u64 ttbr; + + if (!system_uses_ttbr0_pan()) + return; + + if (mm == &init_mm) + ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); + else + ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; + + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } #else static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -187,10 +221,19 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif -static inline void __switch_mm(struct mm_struct *next) +#define enter_lazy_tlb enter_lazy_tlb +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { - unsigned int cpu = smp_processor_id(); + /* + * We don't actually care about the ttbr0 mapping, so point it at the + * zero page. + */ + update_saved_ttbr0(tsk, &init_mm); +} +static inline void __switch_mm(struct mm_struct *next) +{ /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. @@ -200,7 +243,7 @@ static inline void __switch_mm(struct mm_struct *next) return; } - check_and_switch_context(next, cpu); + check_and_switch_context(next); } static inline void @@ -214,18 +257,71 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch - * of another thread of the same process). Avoid setting the reserved - * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). + * of another thread of the same process). */ - if (next != &init_mm) - update_saved_ttbr0(tsk, next); + update_saved_ttbr0(tsk, next); } -#define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, current) +static inline const struct cpumask * +__task_cpu_possible_mask(struct task_struct *p, const struct cpumask *mask) +{ + if (!static_branch_unlikely(&arm64_mismatched_32bit_el0)) + return mask; + + if (!is_compat_thread(task_thread_info(p))) + return mask; + + return system_32bit_el0_cpumask(); +} + +static inline const struct cpumask * +task_cpu_possible_mask(struct task_struct *p) +{ + return __task_cpu_possible_mask(p, cpu_possible_mask); +} +#define task_cpu_possible_mask task_cpu_possible_mask + +const struct cpumask *task_cpu_fallback_mask(struct task_struct *p); void verify_cpu_asid_bits(void); +void post_ttbr_update_workaround(void); + +unsigned long arm64_mm_context_get(struct mm_struct *mm); +void arm64_mm_context_put(struct mm_struct *mm); + +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> 8; +} + +/* + * Only enforce protection keys on the current process, because there is no + * user context to access POR_EL0 for another address space. + */ +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, bool foreign) +{ + if (!system_supports_poe()) + return true; + + /* allow access if the VMA is not one from this process */ + if (foreign || vma_is_foreign(vma)) + return true; + + return por_el0_allows_pkey(vma_pkey(vma), write, execute); +} + +#define deactivate_mm deactivate_mm +static inline void deactivate_mm(struct task_struct *tsk, + struct mm_struct *mm) +{ + gcs_free(tsk); +} + + +#include <asm-generic/mmu_context.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* !__ASM_MMU_CONTEXT_H */ |
