summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/64/tlbflush.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 23:26:05 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-01 18:33:09 +1000
commit1a472c9dba6b9646fd36717968f6a531b4441c7d (patch)
tree3cab56eaa3a25ff717b38f4a712d430b48a78fb3 /arch/powerpc/include/asm/book3s/64/tlbflush.h
parent676012a66f651a98808459bc8ab75661828ed96f (diff)
powerpc/mm/radix: Add tlbflush routines
Core kernel doesn't track the page size of the VA range that we are invalidating. Hence we end up flushing TLB for the entire mm here. Later patches will improve this. We also don't flush page walk cache separetly instead use RIC=2 when flushing TLB, because we do a MMU gather flush after freeing page table. MMU_NO_CONTEXT is updated for hash. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s/64/tlbflush.h')
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 476ea24b0313..d98424ae356c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -1,51 +1,71 @@
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
+#define MMU_NO_CONTEXT ~0UL
+
+
#include <asm/book3s/64/tlbflush-hash.h>
+#include <asm/book3s/64/tlbflush-radix.h>
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ if (radix_enabled())
+ return radix__flush_tlb_range(vma, start, end);
return hash__flush_tlb_range(vma, start, end);
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
+ if (radix_enabled())
+ return radix__flush_tlb_kernel_range(start, end);
return hash__flush_tlb_kernel_range(start, end);
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
+ if (radix_enabled())
+ return radix__local_flush_tlb_mm(mm);
return hash__local_flush_tlb_mm(mm);
}
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
+ if (radix_enabled())
+ return radix__local_flush_tlb_page(vma, vmaddr);
return hash__local_flush_tlb_page(vma, vmaddr);
}
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{
+ if (radix_enabled())
+ return radix__flush_tlb_page(vma, vmaddr);
return hash__flush_tlb_page_nohash(vma, vmaddr);
}
static inline void tlb_flush(struct mmu_gather *tlb)
{
+ if (radix_enabled())
+ return radix__tlb_flush(tlb);
return hash__tlb_flush(tlb);
}
#ifdef CONFIG_SMP
static inline void flush_tlb_mm(struct mm_struct *mm)
{
+ if (radix_enabled())
+ return radix__flush_tlb_mm(mm);
return hash__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
+ if (radix_enabled())
+ return radix__flush_tlb_page(vma, vmaddr);
return hash__flush_tlb_page(vma, vmaddr);
}
#else