diff options
Diffstat (limited to 'arch/s390/include/asm/tlbflush.h')
| -rw-r--r-- | arch/s390/include/asm/tlbflush.h | 47 |
1 files changed, 8 insertions, 39 deletions
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 8c840f0904f3..163ccbbe8c47 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -2,11 +2,11 @@ #ifndef _S390_TLBFLUSH_H #define _S390_TLBFLUSH_H +#include <linux/cpufeature.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm/processor.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> +#include <asm/machine.h> /* * Flush all TLB entries on the local CPU. @@ -24,25 +24,20 @@ static inline void __tlb_flush_idte(unsigned long asce) unsigned long opt; opt = IDTE_PTOA; - if (MACHINE_HAS_TLB_GUEST) + if (machine_has_tlb_guest()) opt |= IDTE_GUEST_ASCE; /* Global TLB flush for the mm */ - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (opt), "a" (asce) : "cc"); + asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); } -#ifdef CONFIG_SMP -void smp_ptlb_all(void); - /* * Flush all TLB entries on all CPUs. */ static inline void __tlb_flush_global(void) { - unsigned int dummy = 0; + unsigned long dummy = 0; - csp(&dummy, 0, 0); + cspg(&dummy, 0, 0); } /* @@ -53,18 +48,13 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) { unsigned long gmap_asce; - /* - * If the machine has IDTE we prefer to do a per mm flush - * on all cpus instead of doing a local flush if the mm - * only ran on the local cpu. - */ preempt_disable(); atomic_inc(&mm->context.flush_count); /* Reset TLB flush mask */ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); barrier(); gmap_asce = READ_ONCE(mm->context.gmap_asce); - if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { + if (gmap_asce != -1UL) { if (gmap_asce) __tlb_flush_idte(gmap_asce); __tlb_flush_idte(mm->context.asce); @@ -78,27 +68,8 @@ static inline void __tlb_flush_mm(struct mm_struct *mm) static inline void __tlb_flush_kernel(void) { - if (MACHINE_HAS_IDTE) - __tlb_flush_idte(init_mm.context.asce); - else - __tlb_flush_global(); -} -#else -#define __tlb_flush_global() __tlb_flush_local() - -/* - * Flush TLB entries for a specific ASCE on all CPUs. - */ -static inline void __tlb_flush_mm(struct mm_struct *mm) -{ - __tlb_flush_local(); -} - -static inline void __tlb_flush_kernel(void) -{ - __tlb_flush_local(); + __tlb_flush_idte(init_mm.context.asce); } -#endif static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) { @@ -112,7 +83,6 @@ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) /* * TLB flushing: - * flush_tlb() - flushes the current mm struct TLBs * flush_tlb_all() - flushes all processes TLBs * flush_tlb_mm(mm) - flushes the specified mm context TLB's * flush_tlb_page(vma, vmaddr) - flushes one page @@ -128,7 +98,6 @@ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) * only one user. At the end of the update the flush_tlb_mm and * flush_tlb_range functions need to do the flush. */ -#define flush_tlb() do { } while (0) #define flush_tlb_all() do { } while (0) #define flush_tlb_page(vma, addr) do { } while (0) |
