summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/mmu_context.h
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2020-09-02 00:15:20 +1000
committerArnd Bergmann <arnd@arndb.de>2020-10-26 16:45:06 +0100
commit292f70d7cdd506e21d12de650f626b95d31b7ec7 (patch)
treef7cc30e4a44440d72f35a5361bfa529ebbb87918 /arch/arm/include/asm/mmu_context.h
parent75e6a851d696218a446028ea10339f3039e47a35 (diff)
arm: use asm-generic/mmu_context.h for no-op implementations
Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r--arch/arm/include/asm/mmu_context.h26
1 files changed, 3 insertions, 23 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index f99ed524fe41..84e58956fcab 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -26,6 +26,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -92,33 +94,11 @@ static inline void finish_arch_post_lock_switch(void)
#endif /* CONFIG_MMU */
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
- return 0;
-}
-
-
#endif /* CONFIG_CPU_HAS_ASID */
-#define destroy_context(mm) do { } while(0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
/*
- * This is called when "tsk" is about to enter lazy TLB mode.
- *
- * mm: describes the currently active mm context
- * tsk: task which is entering lazy tlb
- * cpu: cpu number which is entering lazy tlb
- *
- * tsk->mm will be NULL
- */
-static inline void
-enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
@@ -149,6 +129,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
-#define deactivate_mm(tsk,mm) do { } while (0)
+#include <asm-generic/mmu_context.h>
#endif