diff options
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
| -rw-r--r-- | arch/arm/include/asm/mmu_context.h | 80 |
1 files changed, 51 insertions, 29 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index b5792b7fd8d3..db2cb06aa8cf 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/mmu_context.h * * Copyright (C) 1996 Russell King. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Changelog: * 27-06-1996 RMK Created */ @@ -15,6 +12,9 @@ #include <linux/compiler.h> #include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/preempt.h> + #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> @@ -23,10 +23,27 @@ void __check_vmalloc_seq(struct mm_struct *mm); +#ifdef CONFIG_MMU +static inline void check_vmalloc_seq(struct mm_struct *mm) +{ + if (!IS_ENABLED(CONFIG_ARM_LPAE) && + unlikely(atomic_read(&mm->context.vmalloc_seq) != + atomic_read(&init_mm.context.vmalloc_seq))) + __check_vmalloc_seq(mm); +} +#endif + #ifdef CONFIG_CPU_HAS_ASID void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); -#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) + +#define init_new_context init_new_context +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + atomic64_set(&mm->context.id, 0); + return 0; +} #ifdef CONFIG_ARM_ERRATA_798181 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, @@ -45,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { - if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) - __check_vmalloc_seq(mm); + check_vmalloc_seq(mm); if (irqs_disabled()) /* @@ -56,45 +72,42 @@ static inline void check_and_switch_context(struct mm_struct *mm, * on non-ASID CPUs, the old mm will remain valid until the * finish_arch_post_lock_switch() call. */ - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + mm->context.switch_pending = 1; else cpu_switch_mm(mm->pgd, mm); } +#ifndef MODULE #define finish_arch_post_lock_switch \ finish_arch_post_lock_switch static inline void finish_arch_post_lock_switch(void) { - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { - struct mm_struct *mm = current->mm; - cpu_switch_mm(mm->pgd, mm); + struct mm_struct *mm = current->mm; + + if (mm && mm->context.switch_pending) { + /* + * Preemption must be disabled during cpu_switch_mm() as we + * have some stateful cache flush implementations. Check + * switch_pending again in case we were preempted and the + * switch to this mm was already done. + */ + preempt_disable(); + if (mm->context.switch_pending) { + mm->context.switch_pending = 0; + cpu_switch_mm(mm->pgd, mm); + } + preempt_enable_no_resched(); } } +#endif /* !MODULE */ #endif /* CONFIG_MMU */ -#define init_new_context(tsk,mm) 0 - #endif /* CONFIG_CPU_HAS_ASID */ -#define destroy_context(mm) do { } while(0) #define activate_mm(prev,next) switch_mm(prev, next, NULL) /* - * This is called when "tsk" is about to enter lazy TLB mode. - * - * mm: describes the currently active mm context - * tsk: task which is entering lazy tlb - * cpu: cpu number which is entering lazy tlb - * - * tsk->mm will be NULL - */ -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -/* * This is the actual mm switch as far as the scheduler * is concerned. No registers are touched. We avoid * calling the CPU specific function when the mm hasn't @@ -125,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #endif } -#define deactivate_mm(tsk,mm) do { } while (0) +#ifdef CONFIG_VMAP_STACK +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + if (mm != &init_mm) + check_vmalloc_seq(mm); +} +#define enter_lazy_tlb enter_lazy_tlb +#endif + +#include <asm-generic/mmu_context.h> #endif |
