diff options
Diffstat (limited to 'arch/powerpc/include/asm/mmu_context.h')
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 77 |
1 files changed, 75 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 492d8140a395..051b3d63afe3 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -78,6 +78,52 @@ extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); +#ifdef CONFIG_PPC_BOOK3S_64 +static inline void inc_mm_active_cpus(struct mm_struct *mm) +{ + atomic_inc(&mm->context.active_cpus); +} + +static inline void dec_mm_active_cpus(struct mm_struct *mm) +{ + atomic_dec(&mm->context.active_cpus); +} + +static inline void mm_context_add_copro(struct mm_struct *mm) +{ + /* + * On hash, should only be called once over the lifetime of + * the context, as we can't decrement the active cpus count + * and flush properly for the time being. + */ + inc_mm_active_cpus(mm); +} + +static inline void mm_context_remove_copro(struct mm_struct *mm) +{ + /* + * Need to broadcast a global flush of the full mm before + * decrementing active_cpus count, as the next TLBI may be + * local and the nMMU and/or PSL need to be cleaned up. + * Should be rare enough so that it's acceptable. + * + * Skip on hash, as we don't know how to do the proper flush + * for the time being. Invalidations will remain global if + * used on hash. + */ + if (radix_enabled()) { + flush_all_mm(mm); + dec_mm_active_cpus(mm); + } +} +#else +static inline void inc_mm_active_cpus(struct mm_struct *mm) { } +static inline void dec_mm_active_cpus(struct mm_struct *mm) { } +static inline void mm_context_add_copro(struct mm_struct *mm) { } +static inline void mm_context_remove_copro(struct mm_struct *mm) { } +#endif + + extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk); @@ -114,14 +160,19 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } +#ifndef CONFIG_PPC_BOOK3S_64 static inline void arch_exit_mmap(struct mm_struct *mm) { } +#else +extern void arch_exit_mmap(struct mm_struct *mm); +#endif static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, @@ -136,11 +187,33 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, { } +#ifdef CONFIG_PPC_MEM_KEYS +bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, + bool execute, bool foreign); +#else /* CONFIG_PPC_MEM_KEYS */ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; } + +#define pkey_mm_init(mm) +#define thread_pkey_regs_save(thread) +#define thread_pkey_regs_restore(new_thread, old_thread) +#define thread_pkey_regs_init(thread) + +static inline int vma_pkey(struct vm_area_struct *vma) +{ + return 0; +} + +static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) +{ + return 0x0UL; +} + +#endif /* CONFIG_PPC_MEM_KEYS */ + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |