summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-28 13:54:03 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-28 13:54:03 -1000
commita2601d78b77aacc5dd790f488188f9556f4a9eb2 (patch)
tree26c9e699a0570edd4018987ee24bfce439a730eb /arch/powerpc/mm
parent616d8cf0fa0bc6528d827b1ef94f657039f3fa5e (diff)
parent52396500f97c53860164debc7d4f759077853423 (diff)
Merge tag 'powerpc-4.16-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "Some more powerpc fixes for 4.16. Apologies if this is a bit big at rc7, but they're all reasonably important fixes. None are actually for new code, so they aren't indicative of 4.16 being in bad shape from our point of view. - Fix missing AT_BASE_PLATFORM (in auxv) when we're using a new firmware interface for describing CPU features. - Fix lost pending interrupts due to a race in our interrupt soft-masking code. - A workaround for a nest MMU bug with TLB invalidations on Power9. - A workaround for broadcast TLB invalidations on Power9. - Fix a bug in our instruction SLB miss handler, when handling bad addresses (eg. >= TASK_SIZE), which could corrupt non-volatile user GPRs. Thanks to: Aneesh Kumar K.V, Balbir Singh, Benjamin Herrenschmidt, Nicholas Piggin" * tag 'powerpc-4.16-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/64s: Fix i-side SLB miss bad address handler saving nonvolatile GPRs powerpc/mm: Fixup tlbie vs store ordering issue on POWER9 powerpc/mm/radix: Move the functions that does the actual tlbie closer powerpc/mm/radix: Remove unused code powerpc/mm: Workaround Nest MMU bug with TLB invalidations powerpc/mm: Add tracking of the number of coprocessors using a context powerpc/64s: Fix lost pending interrupt due to race causing lost update to irq_happened powerpc/64s: Fix NULL AT_BASE_PLATFORM when using DT CPU features
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_native_64.c16
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c169
4 files changed, 107 insertions, 80 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index a0675e91ad7d..656933c85925 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -201,6 +201,15 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
return va;
}
+static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ /* Need the extra ptesync to ensure we don't reorder tlbie*/
+ asm volatile("ptesync": : :"memory");
+ ___tlbie(vpn, psize, apsize, ssize);
+ }
+}
+
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long rb;
@@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
asm volatile("ptesync": : :"memory");
} else {
__tlbie(vpn, psize, apsize, ssize);
+ fixup_tlbie(vpn, psize, apsize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -771,7 +781,7 @@ static void native_hpte_clear(void)
*/
static void native_flush_hash_range(unsigned long number, int local)
{
- unsigned long vpn;
+ unsigned long vpn = 0;
unsigned long hash, index, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
@@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
__tlbie(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
+ /*
+ * Just do one more with the last used values.
+ */
+ fixup_tlbie(vpn, psize, psize, ssize);
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..3f980baade4c 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm_iommu_init(mm);
#endif
atomic_set(&mm->context.active_cpus, 0);
+ atomic_set(&mm->context.copros, 0);
return 0;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 28c980eb4422..adf469f312f2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
}
+ /* do we need fixup here ?*/
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 71d1b19ad1c0..a07f5372a4bf 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -119,6 +119,49 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb,rs,prs,r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = pid << PPC_BITLSHIFT(31);
+ prs = 1; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
+}
+
+static inline void __tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb,rs,prs,r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = pid << PPC_BITLSHIFT(31);
+ prs = 1; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
+static inline void fixup_tlbie(void)
+{
+ unsigned long pid = 0;
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ asm volatile("ptesync": : :"memory");
+ __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
+ }
+}
+
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
@@ -151,24 +194,25 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{
asm volatile("ptesync": : :"memory");
- __tlbie_pid(pid, ric);
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
-}
-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
-{
- unsigned long rb,rs,prs,r;
-
- rb = va & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = pid << PPC_BITLSHIFT(31);
- prs = 1; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- trace_tlbie(0, 1, rb, rs, ric, prs, r);
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid(pid, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid(pid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid(pid, RIC_FLUSH_ALL);
+ }
+ fixup_tlbie();
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
@@ -203,22 +247,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
asm volatile("ptesync": : :"memory");
}
-static inline void __tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
-{
- unsigned long rb,rs,prs,r;
-
- rb = va & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = pid << PPC_BITLSHIFT(31);
- prs = 1; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- trace_tlbie(0, 0, rb, rs, ric, prs, r);
-}
-
static inline void __tlbie_va_range(unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
unsigned long psize)
@@ -237,6 +265,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile("ptesync": : :"memory");
__tlbie_va(va, pid, ap, ric);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -248,6 +277,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
if (also_pwc)
__tlbie_pid(pid, RIC_FLUSH_PWC);
__tlbie_va_range(start, end, pid, page_size, psize);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -311,6 +341,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
}
EXPORT_SYMBOL(radix__local_flush_tlb_page);
+static bool mm_needs_flush_escalation(struct mm_struct *mm)
+{
+ /*
+ * P9 nest MMU has issues with the page walk cache
+ * caching PTEs and not flushing them properly when
+ * RIC = 0 for a PID/LPID invalidate
+ */
+ return atomic_read(&mm->context.copros) != 0;
+}
+
#ifdef CONFIG_SMP
void radix__flush_tlb_mm(struct mm_struct *mm)
{
@@ -321,9 +361,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
return;
preempt_disable();
- if (!mm_is_thread_local(mm))
- _tlbie_pid(pid, RIC_FLUSH_TLB);
- else
+ if (!mm_is_thread_local(mm)) {
+ if (mm_needs_flush_escalation(mm))
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
+ } else
_tlbiel_pid(pid, RIC_FLUSH_TLB);
preempt_enable();
}
@@ -435,10 +478,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
if (full) {
- if (local)
+ if (local) {
_tlbiel_pid(pid, RIC_FLUSH_TLB);
- else
- _tlbie_pid(pid, RIC_FLUSH_TLB);
+ } else {
+ if (mm_needs_flush_escalation(mm))
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
+ }
} else {
bool hflush = false;
unsigned long hstart, hend;
@@ -465,6 +512,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (hflush)
__tlbie_va_range(hstart, hend, pid,
HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
}
@@ -548,6 +596,9 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
}
if (full) {
+ if (!local && mm_needs_flush_escalation(mm))
+ also_pwc = true;
+
if (local)
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
else
@@ -603,46 +654,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
- unsigned long page_size)
-{
- unsigned long rb,rs,prs,r;
- unsigned long ap;
- unsigned long ric = RIC_FLUSH_TLB;
-
- ap = mmu_get_ap(radix_get_mmu_psize(page_size));
- rb = gpa & ~(PPC_BITMASK(52, 63));
- rb |= ap << PPC_BITLSHIFT(58);
- rs = lpid & ((1UL << 32) - 1);
- prs = 0; /* process scoped */
- r = 1; /* raidx format */
-
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
- trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
-
-void radix__flush_tlb_lpid(unsigned long lpid)
-{
- unsigned long rb,rs,prs,r;
- unsigned long ric = RIC_FLUSH_ALL;
-
- rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
- rs = lpid & ((1UL << 32) - 1);
- prs = 0; /* partition scoped */
- r = 1; /* raidx format */
-
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
- : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- asm volatile("eieio; tlbsync; ptesync": : :"memory");
- trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
-}
-EXPORT_SYMBOL(radix__flush_tlb_lpid);
-
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{