summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/misc_32.S
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2008-07-15 16:12:25 -0500
committerKumar Gala <galak@kernel.crashing.org>2008-09-24 16:29:40 -0500
commit0ba3418b8b1c85ee1771c63f1dd12041614e56ff (patch)
tree6f77bf668e76725710aae5126054eebd5913d319 /arch/powerpc/kernel/misc_32.S
parent1afb7f809bfb8fad9eec9419f3dfd75cee746ebd (diff)
powerpc: Introduce local (non-broadcast) forms of tlb invalidates
Introduced a new set of low level tlb invalidate functions that do not broadcast invalidates on the bus: _tlbil_all - invalidate all _tlbil_pid - invalidate based on process id (or mm context) _tlbil_va - invalidate based on virtual address (ea + pid) On non-SMP configs _tlbil_all should be functionally equivalent to _tlbia and _tlbil_va should be functionally equivalent to _tlbie. The intent of this change is to handle SMP based invalidates via IPIs instead of broadcasts as the mechanism scales better for larger number of cores. On e500 (fsl-booke mmu) based cores move to using MMUCSR for invalidate alls and tlbsx/tlbwe for invalidate virtual address. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
-rw-r--r--arch/powerpc/kernel/misc_32.S54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7a6dfbca7682..e9c8ab6eabfe 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -274,6 +274,10 @@ _GLOBAL(real_writeb)
/*
* Flush MMU TLB
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+#endif
_GLOBAL(_tlbia)
#if defined(CONFIG_40x)
sync /* Flush to memory before changing mapping */
@@ -344,6 +348,9 @@ _GLOBAL(_tlbia)
/*
* Flush MMU TLB for a particular address
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_va)
+#endif
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
/* We run the search with interrupts disabled because we have to change
@@ -436,6 +443,53 @@ _GLOBAL(_tlbie)
#endif /* ! CONFIG_40x */
blr
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * Flush MMU TLB, but only on the local processor (no broadcast)
+ */
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ blr
+
+/*
+ * Flush MMU TLB for a particular process id, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_pid)
+/* we currently do an invalidate all since we don't have per pid invalidate */
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beqlr
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ blr
+#endif /* CONFIG_FSL_BOOKE */
+
+
/*
* Flush instruction cache.
* This is a no-op on the 601.