summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/mce_power.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2017-07-06 20:51:28 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2017-07-11 12:53:53 +1000
commit41d0c2ecde19cfe93071ed7b979a53ba60b12840 (patch)
tree53125a7e90771bad98aa47d756a93e4e9fbf91cc /arch/powerpc/kernel/mce_power.c
parent3a6a04706fd08eb5677fdfc086e26fcd5eb154f4 (diff)
powerpc/powernv: Fix local TLB flush for boot and MCE on POWER9
There are two cases outside the normal address space management where a CPU's local TLB is to be flushed: 1. Host boot; in case something has left stale entries in the TLB (e.g., kexec). 2. Machine check; to clean corrupted TLB entries. CPU state restore from deep idle states also flushes the TLB. However this seems to be a side effect of reusing the boot code to set CPU state, rather than a requirement itself. The current flushing has a number of problems with ISA v3.0B: - The current radix mode of the MMU is not taken into account. tlbiel is undefined if the R field does not match the current radix mode. - ISA v3.0B hash must flush the partition and process table caches. - ISA v3.0B radix must flush partition and process scoped translations, partition and process table caches, and also the page walk cache. Add POWER9 cases to handle these, with radix vs hash determined by the host MMU mode. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel/mce_power.c')
-rw-r--r--arch/powerpc/kernel/mce_power.c56
1 files changed, 55 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index d24e689e893f..b76ca198e09c 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -53,6 +53,60 @@ static void flush_tlb_206(unsigned int num_sets, unsigned int action)
asm volatile("ptesync" : : : "memory");
}
+static void flush_tlb_300(unsigned int num_sets, unsigned int action)
+{
+ unsigned long rb;
+ unsigned int i;
+ unsigned int r;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ rb = TLBIEL_INVAL_SET;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ rb = TLBIEL_INVAL_SET_LPID;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+
+ if (early_radix_enabled())
+ r = 1;
+ else
+ r = 0;
+
+ /*
+ * First flush table/PWC caches with set 0, then flush the
+ * rest of the sets, partition scope. Radix must then do it
+ * all again with process scope. Hash just has to flush
+ * process table.
+ */
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb), "r"(0), "i"(2), "i"(0), "r"(r));
+ for (i = 1; i < num_sets; i++) {
+ unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb+set), "r"(0), "i"(2), "i"(0), "r"(r));
+ }
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb), "r"(0), "i"(2), "i"(1), "r"(r));
+ if (early_radix_enabled()) {
+ for (i = 1; i < num_sets; i++) {
+ unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
+ "r"(rb+set), "r"(0), "i"(2), "i"(1), "r"(r));
+ }
+ }
+
+ asm volatile("ptesync" : : : "memory");
+}
+
/*
* Generic routines to flush TLB on POWER processors. These routines
* are used as flush_tlb hook in the cpu_spec.
@@ -79,7 +133,7 @@ void __flush_tlb_power9(unsigned int action)
else
num_sets = POWER9_TLB_SETS_HASH;
- flush_tlb_206(num_sets, action);
+ flush_tlb_300(num_sets, action);
}