diff options
Diffstat (limited to 'arch/sparc/include/asm/mmu_context_64.h')
| -rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 78 |
1 files changed, 69 insertions, 9 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 87841d687f8d..78bbacc14d2d 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -1,19 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SPARC64_MMU_CONTEXT_H #define __SPARC64_MMU_CONTEXT_H /* Derived heavily from Linus's Alpha/AXP ASN code... */ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/smp.h> +#include <linux/sched.h> #include <asm/spitfire.h> +#include <asm/adi_64.h> #include <asm-generic/mm_hooks.h> - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} +#include <asm/percpu.h> extern spinlock_t ctx_alloc_lock; extern unsigned long tlb_context_cache; @@ -21,7 +22,10 @@ extern unsigned long mmu_context_bmap[]; DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); void get_new_mmu_context(struct mm_struct *mm); + +#define init_new_context init_new_context int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +#define destroy_context destroy_context void destroy_context(struct mm_struct *mm); void __tsb_context_switch(unsigned long pgd_pa, @@ -89,7 +93,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str /* We have to be extremely careful here or else we will miss * a TSB grow if we switch back and forth between a kernel - * thread and an address space which has it's TSB size increased + * thread and an address space which has its TSB size increased * on another processor. * * It is possible to play some games in order to optimize the @@ -114,7 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * * At that point cpu0 continues to use a stale TSB, the one from * before the TSB grow performed on cpu1. cpu1 did not cross-call - * cpu0 to update it's TSB because at that point the cpu_vm_mask + * cpu0 to update its TSB because at that point the cpu_vm_mask * only had cpu1 set in it. */ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); @@ -131,8 +135,64 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str spin_unlock_irqrestore(&mm->context.lock, flags); } -#define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) -#endif /* !(__ASSEMBLY__) */ + +#define __HAVE_ARCH_START_CONTEXT_SWITCH +static inline void arch_start_context_switch(struct task_struct *prev) +{ + /* Save the current state of MCDPER register for the process + * we are switching from + */ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + __asm__ __volatile__( + ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ + "mov %%g1, %0\n\t" + : "=r" (tmp_mcdper) + : + : "g1"); + if (tmp_mcdper) + set_tsk_thread_flag(prev, TIF_MCDPER); + else + clear_tsk_thread_flag(prev, TIF_MCDPER); + } +} + +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + /* Restore the state of MCDPER register for the new process + * just switched to. + */ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + tmp_mcdper = test_thread_flag(TIF_MCDPER); + __asm__ __volatile__( + "mov %0, %%g1\n\t" + ".word 0x9d800001\n\t" /* wr %g0, %g1, %mcdper" */ + ".word 0xaf902001\n\t" /* wrpr %g0, 1, %pmcdper */ + : + : "ir" (tmp_mcdper) + : "g1"); + if (current && current->mm && current->mm->context.adi) { + struct pt_regs *regs; + + regs = task_pt_regs(current); + regs->tstate |= TSTATE_MCDE; + } + } +} + +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> adi_nbits(); +} + +#include <asm-generic/mmu_context.h> + +#endif /* !(__ASSEMBLER__) */ #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |
