summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-12-02 14:19:20 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2014-12-02 14:19:20 +1100
commitb5be75d00833a3f0ff76b1d7473119be33367faa (patch)
tree88106f598970b086f06a0d7dd123b6d00adf9f4a /arch/powerpc/mm
parente39f223fc93580c86ccf6b3422033e349f57f0dd (diff)
parentd557b09800dab5dd6804e5b79324069abcf0be11 (diff)
Merge remote-tracking branch 'benh/next' into next
Merge updates collected & acked by Ben. A few EEH patches from Gavin, some mm updates from Aneesh and a few odds and ends.
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_native_64.c28
-rw-r--r--arch/powerpc/mm/hash_utils_64.c70
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c54
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/pgtable_64.c69
5 files changed, 110 insertions, 118 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d53288a08c37..13700911b522 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -294,8 +294,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
vpn, want_v & HPTE_V_AVPN, slot, newpp);
- native_lock_hpte(hptep);
-
hpte_v = be64_to_cpu(hptep->v);
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
@@ -308,16 +306,24 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> miss\n");
ret = -1;
} else {
- DBG_LOW(" -> hit\n");
- /* Update the HPTE */
- hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
+ native_lock_hpte(hptep);
+ /* recheck with locks held */
+ hpte_v = be64_to_cpu(hptep->v);
+ if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
+ !(hpte_v & HPTE_V_VALID))) {
+ ret = -1;
+ } else {
+ DBG_LOW(" -> hit\n");
+ /* Update the HPTE */
+ hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
+ ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N |
+ HPTE_R_C)));
+ }
+ native_unlock_hpte(hptep);
}
- native_unlock_hpte(hptep);
-
/* Ensure it is out of the tlb too. */
tlbie(vpn, bpsize, apsize, ssize, local);
-
return ret;
}
@@ -419,7 +425,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
static void native_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize)
+ int psize, int ssize, int local)
{
int i;
struct hash_pte *hptep;
@@ -465,7 +471,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
* instruction compares entry_VA in tlb with the VA specified
* here
*/
- tlbie(vpn, psize, actual_psize, ssize, 0);
+ tlbie(vpn, psize, actual_psize, ssize, local);
}
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f01027731e23..68211d398fdb 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1315,6 +1315,76 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
#endif
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
+ pmd_t *pmdp, unsigned int psize, int ssize, int local)
+{
+ int i, max_hpte_count, valid;
+ unsigned long s_addr;
+ unsigned char *hpte_slot_array;
+ unsigned long hidx, shift, vpn, hash, slot;
+
+ s_addr = addr & HPAGE_PMD_MASK;
+ hpte_slot_array = get_hpte_slot_array(pmdp);
+ /*
+ * IF we try to do a HUGE PTE update after a withdraw is done.
+ * we will find the below NULL. This happens when we do
+ * split_huge_page_pmd
+ */
+ if (!hpte_slot_array)
+ return;
+
+ if (ppc_md.hugepage_invalidate) {
+ ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
+ psize, ssize, local);
+ goto tm_abort;
+ }
+ /*
+ * No bluk hpte removal support, invalidate each entry
+ */
+ shift = mmu_psize_defs[psize].shift;
+ max_hpte_count = HPAGE_PMD_SIZE >> shift;
+ for (i = 0; i < max_hpte_count; i++) {
+ /*
+ * 8 bits per each hpte entries
+ * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
+ */
+ valid = hpte_valid(hpte_slot_array, i);
+ if (!valid)
+ continue;
+ hidx = hpte_hash_index(hpte_slot_array, i);
+
+ /* get the vpn */
+ addr = s_addr + (i * (1ul << shift));
+ vpn = hpt_vpn(addr, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ ppc_md.hpte_invalidate(slot, vpn, psize,
+ MMU_PAGE_16M, ssize, local);
+ }
+tm_abort:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Transactions are not aborted by tlbiel, only tlbie.
+ * Without, syncing a page back to a block device w/ PIO could pick up
+ * transactional data (bad!) so we force an abort here. Before the
+ * sync the page will be made read-only, which will flush_hash_page.
+ * BIG ISSUE here: if the kernel uses a page from userspace without
+ * unmapping it first, it may see the speculated version.
+ */
+ if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
+ MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ tm_enable();
+ tm_abort(TM_CAUSE_TLBI);
+ }
+#endif
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
void flush_hash_range(unsigned long number, int local)
{
if (ppc_md.flush_hash_range)
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 5f5e6328c21c..3a648cd363ae 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -18,57 +18,6 @@
#include <linux/mm.h>
#include <asm/machdep.h>
-static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
- pmd_t *pmdp, unsigned int psize, int ssize)
-{
- int i, max_hpte_count, valid;
- unsigned long s_addr;
- unsigned char *hpte_slot_array;
- unsigned long hidx, shift, vpn, hash, slot;
-
- s_addr = addr & HPAGE_PMD_MASK;
- hpte_slot_array = get_hpte_slot_array(pmdp);
- /*
- * IF we try to do a HUGE PTE update after a withdraw is done.
- * we will find the below NULL. This happens when we do
- * split_huge_page_pmd
- */
- if (!hpte_slot_array)
- return;
-
- if (ppc_md.hugepage_invalidate)
- return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
- psize, ssize);
- /*
- * No bluk hpte removal support, invalidate each entry
- */
- shift = mmu_psize_defs[psize].shift;
- max_hpte_count = HPAGE_PMD_SIZE >> shift;
- for (i = 0; i < max_hpte_count; i++) {
- /*
- * 8 bits per each hpte entries
- * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
- */
- valid = hpte_valid(hpte_slot_array, i);
- if (!valid)
- continue;
- hidx = hpte_hash_index(hpte_slot_array, i);
-
- /* get the vpn */
- addr = s_addr + (i * (1ul << shift));
- vpn = hpt_vpn(addr, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
- if (hidx & _PTEIDX_SECONDARY)
- hash = ~hash;
-
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, 0);
- }
-}
-
-
int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
pmd_t *pmdp, unsigned long trap, int local, int ssize,
unsigned int psize)
@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* hash page table entries.
*/
if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
- invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
+ flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
+ ssize, local);
}
valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8c9b8115867c..868ab0fc5fbf 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -355,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
if (size != 0) {
if (sscanf(val, "%lu", &npages) <= 0)
npages = 0;
+ if (npages > MAX_NUMBER_GPAGES) {
+ pr_warn("MMU: %lu pages requested for page "
+ "size %llu KB, limiting to "
+ __stringify(MAX_NUMBER_GPAGES) "\n",
+ npages, size / 1024);
+ npages = MAX_NUMBER_GPAGES;
+ }
gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
size = 0;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 87ff0c1908a9..eea9fa1f8ae7 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -739,29 +739,14 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd)
{
- int ssize, i;
- unsigned long s_addr;
- int max_hpte_count;
- unsigned int psize, valid;
- unsigned char *hpte_slot_array;
- unsigned long hidx, vpn, vsid, hash, shift, slot;
-
- /*
- * Flush all the hptes mapping this hugepage
- */
- s_addr = addr & HPAGE_PMD_MASK;
- hpte_slot_array = get_hpte_slot_array(pmdp);
- /*
- * IF we try to do a HUGE PTE update after a withdraw is done.
- * we will find the below NULL. This happens when we do
- * split_huge_page_pmd
- */
- if (!hpte_slot_array)
- return;
+ int ssize, local = 0;
+ unsigned int psize;
+ unsigned long vsid;
+ const struct cpumask *tmp;
/* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM
- psize = get_slice_psize(mm, s_addr);
+ psize = get_slice_psize(mm, addr);
BUG_ON(psize == MMU_PAGE_16M);
#endif
if (old_pmd & _PAGE_COMBO)
@@ -769,46 +754,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
else
psize = MMU_PAGE_64K;
- if (!is_kernel_addr(s_addr)) {
- ssize = user_segment_size(s_addr);
- vsid = get_vsid(mm->context.id, s_addr, ssize);
+ if (!is_kernel_addr(addr)) {
+ ssize = user_segment_size(addr);
+ vsid = get_vsid(mm->context.id, addr, ssize);
WARN_ON(vsid == 0);
} else {
- vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
+ vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
- if (ppc_md.hugepage_invalidate)
- return ppc_md.hugepage_invalidate(vsid, s_addr,
- hpte_slot_array,
- psize, ssize);
- /*
- * No bluk hpte removal support, invalidate each entry
- */
- shift = mmu_psize_defs[psize].shift;
- max_hpte_count = HPAGE_PMD_SIZE >> shift;
- for (i = 0; i < max_hpte_count; i++) {
- /*
- * 8 bits per each hpte entries
- * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
- */
- valid = hpte_valid(hpte_slot_array, i);
- if (!valid)
- continue;
- hidx = hpte_hash_index(hpte_slot_array, i);
-
- /* get the vpn */
- addr = s_addr + (i * (1ul << shift));
- vpn = hpt_vpn(addr, vsid, ssize);
- hash = hpt_hash(vpn, shift, ssize);
- if (hidx & _PTEIDX_SECONDARY)
- hash = ~hash;
-
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, 0);
- }
+ tmp = cpumask_of(smp_processor_id());
+ if (cpumask_equal(mm_cpumask(mm), tmp))
+ local = 1;
+
+ return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)