summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c9
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c41
-rw-r--r--arch/powerpc/mm/book3s64/internal.h9
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c2
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c23
-rw-r--r--arch/powerpc/mm/book3s64/slb.c109
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c5
-rw-r--r--arch/powerpc/mm/ptdump/book3s64.c5
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c6
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c1
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.h1
-rw-r--r--arch/powerpc/mm/ptdump/shared.c5
12 files changed, 99 insertions, 117 deletions
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index 9ad6b56bfec9..e54a7b011232 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -105,3 +105,12 @@ void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
EXPORT_SYMBOL(hash__flush_tlb_page);
+
+void hash__flush_gather(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || tlb->need_flush_all)
+ hash__flush_tlb_mm(tlb->mm);
+ else
+ hash__flush_range(tlb->mm, tlb->start, tlb->end);
+}
+EXPORT_SYMBOL(hash__flush_gather);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 3aee3af614af..9dc5889d6ecb 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -47,6 +47,7 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
@@ -449,6 +450,7 @@ static __init void hash_kfence_map_pool(void)
{
unsigned long kfence_pool_start, kfence_pool_end;
unsigned long prot = pgprot_val(PAGE_KERNEL);
+ unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
if (!kfence_pool)
return;
@@ -459,6 +461,7 @@ static __init void hash_kfence_map_pool(void)
BUG_ON(htab_bolt_mapping(kfence_pool_start, kfence_pool_end,
kfence_pool, prot, mmu_linear_psize,
mmu_kernel_ssize));
+ update_page_count(mmu_linear_psize, KFENCE_POOL_SIZE >> pshift);
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
}
@@ -952,7 +955,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
block_size = be64_to_cpu(addr_prop[1]);
if (block_size != (16 * GB))
return 0;
- printk(KERN_INFO "Huge page(16GB) memory: "
+ pr_info("Huge page(16GB) memory: "
"addr = 0x%lX size = 0x%lX pages = %d\n",
phys_addr, block_size, expected_pages);
if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) {
@@ -1135,7 +1138,7 @@ static void __init htab_init_page_sizes(void)
mmu_vmemmap_psize = mmu_virtual_psize;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
- printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+ pr_info("Page orders: linear mapping = %d, "
"virtual = %d, io = %d"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
", vmemmap = %d"
@@ -1234,6 +1237,7 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot)
{
int rc;
+ unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
if (end >= H_VMALLOC_START) {
pr_warn("Outside the supported range\n");
@@ -1251,17 +1255,22 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
mmu_kernel_ssize);
BUG_ON(rc2 && (rc2 != -ENOENT));
}
+ update_page_count(mmu_linear_psize, (end - start) >> pshift);
return rc;
}
int hash__remove_section_mapping(unsigned long start, unsigned long end)
{
+ unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
+
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
mmu_kernel_ssize);
if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
pr_warn("Hash collision while resizing HPT\n");
+ if (!rc)
+ update_page_count(mmu_linear_psize, -((end - start) >> pshift));
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -1302,27 +1311,34 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
- phys_addr_t base = 0, size = 0, end;
+ phys_addr_t base = 0, size = 0, end, limit = MEMBLOCK_ALLOC_ANYWHERE;
u64 i;
+ unsigned int pshift = mmu_psize_defs[mmu_linear_psize].shift;
DBG(" -> htab_initialize()\n");
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ limit = ppc64_rma_size;
+
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T;
- printk(KERN_INFO "Using 1TB segments\n");
+ pr_info("Using 1TB segments\n");
}
if (stress_slb_enabled)
static_branch_enable(&stress_slb_key);
+ if (no_slb_preload)
+ static_branch_enable(&no_slb_preload_key);
+
if (stress_hpt_enabled) {
unsigned long tmp;
static_branch_enable(&stress_hpt_key);
// Too early to use nr_cpu_ids, so use NR_CPUS
tmp = memblock_phys_alloc_range(sizeof(struct stress_hpt_struct) * NR_CPUS,
__alignof__(struct stress_hpt_struct),
- 0, MEMBLOCK_ALLOC_ANYWHERE);
+ MEMBLOCK_LOW_LIMIT, limit);
memset((void *)tmp, 0xff, sizeof(struct stress_hpt_struct) * NR_CPUS);
stress_hpt_struct = __va(tmp);
@@ -1356,11 +1372,10 @@ static void __init htab_initialize(void)
mmu_hash_ops.hpte_clear_all();
#endif
} else {
- unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
table = memblock_phys_alloc_range(htab_size_bytes,
htab_size_bytes,
- 0, limit);
+ MEMBLOCK_LOW_LIMIT, limit);
if (!table)
panic("ERROR: Failed to allocate %pa bytes below %pa\n",
&htab_size_bytes, &limit);
@@ -1392,8 +1407,8 @@ static void __init htab_initialize(void)
size = end - base;
base = (unsigned long)__va(base);
- DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
- base, size, prot);
+ pr_debug("creating mapping for region: 0x%pa..0x%pa (prot: %lx)\n",
+ &base, &size, prot);
if ((base + size) >= H_VMALLOC_START) {
pr_warn("Outside the supported range\n");
@@ -1402,6 +1417,8 @@ static void __init htab_initialize(void)
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
+
+ update_page_count(mmu_linear_psize, size >> pshift);
}
hash_kfence_map_pool();
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
@@ -1423,6 +1440,8 @@ static void __init htab_initialize(void)
BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
__pa(tce_alloc_start), prot,
mmu_linear_psize, mmu_kernel_ssize));
+ update_page_count(mmu_linear_psize,
+ (tce_alloc_end - tce_alloc_start) >> pshift);
}
@@ -1867,7 +1886,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
* in vmalloc space, so switch vmalloc
* to 4k pages
*/
- printk(KERN_ALERT "Reducing vmalloc segment "
+ pr_alert("Reducing vmalloc segment "
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
@@ -2432,6 +2451,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n")
static int __init hash64_debugfs(void)
{
+ if (radix_enabled())
+ return 0;
debugfs_create_file("hpt_order", 0600, arch_debugfs_dir, NULL,
&fops_hpt_order);
return 0;
diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h
index a57a25f06a21..cad08d83369c 100644
--- a/arch/powerpc/mm/book3s64/internal.h
+++ b/arch/powerpc/mm/book3s64/internal.h
@@ -22,9 +22,14 @@ static inline bool stress_hpt(void)
return static_branch_unlikely(&stress_hpt_key);
}
-void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
+extern bool no_slb_preload;
+DECLARE_STATIC_KEY_FALSE(no_slb_preload_key);
+static inline bool slb_preload_disabled(void)
+{
+ return static_branch_unlikely(&no_slb_preload_key);
+}
-void slb_setup_new_exec(void);
+void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 4e1e45420bd4..fb9dcf9ca599 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -150,8 +150,6 @@ static int hash__init_new_context(struct mm_struct *mm)
void hash__setup_new_exec(void)
{
slice_setup_new_exec();
-
- slb_setup_new_exec();
}
#else
static inline int hash__init_new_context(struct mm_struct *mm)
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index c9431ae7f78a..e3485db7de02 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -510,20 +510,21 @@ atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
void arch_report_meminfo(struct seq_file *m)
{
- /*
- * Hash maps the memory with one size mmu_linear_psize.
- * So don't bother to print these on hash
- */
- if (!radix_enabled())
- return;
seq_printf(m, "DirectMap4k: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
- seq_printf(m, "DirectMap64k: %8lu kB\n",
+ seq_printf(m, "DirectMap64k: %8lu kB\n",
atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
- seq_printf(m, "DirectMap2M: %8lu kB\n",
- atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
- seq_printf(m, "DirectMap1G: %8lu kB\n",
- atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
+ if (radix_enabled()) {
+ seq_printf(m, "DirectMap2M: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
+ seq_printf(m, "DirectMap1G: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
+ } else {
+ seq_printf(m, "DirectMap16M: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_16M]) << 14);
+ seq_printf(m, "DirectMap16G: %8lu kB\n",
+ atomic_long_read(&direct_pages_count[MMU_PAGE_16G]) << 24);
+ }
}
#endif /* CONFIG_PROC_FS */
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 6b783552403c..15f73abd1506 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -42,6 +42,15 @@ early_param("stress_slb", parse_stress_slb);
__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
+bool no_slb_preload __initdata;
+static int __init parse_no_slb_preload(char *p)
+{
+ no_slb_preload = true;
+ return 0;
+}
+early_param("no_slb_preload", parse_no_slb_preload);
+__ro_after_init DEFINE_STATIC_KEY_FALSE(no_slb_preload_key);
+
static void assert_slb_presence(bool present, unsigned long ea)
{
#ifdef CONFIG_DEBUG_VM
@@ -294,11 +303,14 @@ static bool preload_hit(struct thread_info *ti, unsigned long esid)
return false;
}
-static bool preload_add(struct thread_info *ti, unsigned long ea)
+static void preload_add(struct thread_info *ti, unsigned long ea)
{
unsigned char idx;
unsigned long esid;
+ if (slb_preload_disabled())
+ return;
+
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
/* EAs are stored >> 28 so 256MB segments don't need clearing */
if (ea & ESID_MASK_1T)
@@ -308,7 +320,7 @@ static bool preload_add(struct thread_info *ti, unsigned long ea)
esid = ea >> SID_SHIFT;
if (preload_hit(ti, esid))
- return false;
+ return;
idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
ti->slb_preload_esid[idx] = esid;
@@ -316,8 +328,6 @@ static bool preload_add(struct thread_info *ti, unsigned long ea)
ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
else
ti->slb_preload_nr++;
-
- return true;
}
static void preload_age(struct thread_info *ti)
@@ -328,94 +338,6 @@ static void preload_age(struct thread_info *ti)
ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
}
-void slb_setup_new_exec(void)
-{
- struct thread_info *ti = current_thread_info();
- struct mm_struct *mm = current->mm;
- unsigned long exec = 0x10000000;
-
- WARN_ON(irqs_disabled());
-
- /*
- * preload cache can only be used to determine whether a SLB
- * entry exists if it does not start to overflow.
- */
- if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
- return;
-
- hard_irq_disable();
-
- /*
- * We have no good place to clear the slb preload cache on exec,
- * flush_thread is about the earliest arch hook but that happens
- * after we switch to the mm and have already preloaded the SLBEs.
- *
- * For the most part that's probably okay to use entries from the
- * previous exec, they will age out if unused. It may turn out to
- * be an advantage to clear the cache before switching to it,
- * however.
- */
-
- /*
- * preload some userspace segments into the SLB.
- * Almost all 32 and 64bit PowerPC executables are linked at
- * 0x10000000 so it makes sense to preload this segment.
- */
- if (!is_kernel_addr(exec)) {
- if (preload_add(ti, exec))
- slb_allocate_user(mm, exec);
- }
-
- /* Libraries and mmaps. */
- if (!is_kernel_addr(mm->mmap_base)) {
- if (preload_add(ti, mm->mmap_base))
- slb_allocate_user(mm, mm->mmap_base);
- }
-
- /* see switch_slb */
- asm volatile("isync" : : : "memory");
-
- local_irq_enable();
-}
-
-void preload_new_slb_context(unsigned long start, unsigned long sp)
-{
- struct thread_info *ti = current_thread_info();
- struct mm_struct *mm = current->mm;
- unsigned long heap = mm->start_brk;
-
- WARN_ON(irqs_disabled());
-
- /* see above */
- if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
- return;
-
- hard_irq_disable();
-
- /* Userspace entry address. */
- if (!is_kernel_addr(start)) {
- if (preload_add(ti, start))
- slb_allocate_user(mm, start);
- }
-
- /* Top of stack, grows down. */
- if (!is_kernel_addr(sp)) {
- if (preload_add(ti, sp))
- slb_allocate_user(mm, sp);
- }
-
- /* Bottom of heap, grows up. */
- if (heap && !is_kernel_addr(heap)) {
- if (preload_add(ti, heap))
- slb_allocate_user(mm, heap);
- }
-
- /* see switch_slb */
- asm volatile("isync" : : : "memory");
-
- local_irq_enable();
-}
-
static void slb_cache_slbie_kernel(unsigned int index)
{
unsigned long slbie_data = get_paca()->slb_cache[index];
@@ -502,6 +424,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
copy_mm_to_paca(mm);
+ if (slb_preload_disabled())
+ return;
+
/*
* We gradually age out SLBs after a number of context switches to
* reduce reload overhead of unused entries (like we do with FP/VEC
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index 4ca9cf7a90c9..ff845f251724 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -71,18 +71,23 @@ static const struct flag_info flag_array[] = {
struct ptdump_pg_level pg_level[5] = {
{ /* pgd */
+ .name = "PGD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* p4d */
+ .name = "P4D",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
+ .name = "PUD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pmd */
+ .name = "PMD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pte */
+ .name = "PTE",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
},
diff --git a/arch/powerpc/mm/ptdump/book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c
index 6b2da9241d4c..e8a21c6dc32e 100644
--- a/arch/powerpc/mm/ptdump/book3s64.c
+++ b/arch/powerpc/mm/ptdump/book3s64.c
@@ -104,18 +104,23 @@ static const struct flag_info flag_array[] = {
struct ptdump_pg_level pg_level[5] = {
{ /* pgd */
+ .name = "PGD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* p4d */
+ .name = "P4D",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
+ .name = "PUD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pmd */
+ .name = "PMD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pte */
+ .name = "PTE",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
},
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index a6baa6166d94..671d0dc00c6d 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -216,6 +216,8 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
vpn = hpt_vpn(ea, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ want_v = hpte_old_to_new_v(want_v);
/* to check in the secondary hash table, we invert the hash */
if (!primary)
@@ -229,6 +231,10 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
/* HPTE matches */
*v = be64_to_cpu(hptep->v);
*r = be64_to_cpu(hptep->r);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ *v = hpte_new_to_old_v(*v, *r);
+ *r = hpte_new_to_old_r(*r);
+ }
return 0;
}
++hpte_group;
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index b2358d794855..0d499aebee72 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -178,6 +178,7 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
pt_dump_size(st->seq, addr - st->start_address);
+ pt_dump_seq_printf(st->seq, "%s ", pg_level[st->level].name);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
diff --git a/arch/powerpc/mm/ptdump/ptdump.h b/arch/powerpc/mm/ptdump/ptdump.h
index 4232aa4b57ea..12aa9eca8b0c 100644
--- a/arch/powerpc/mm/ptdump/ptdump.h
+++ b/arch/powerpc/mm/ptdump/ptdump.h
@@ -13,6 +13,7 @@ struct flag_info {
struct ptdump_pg_level {
const struct flag_info *flag;
+ char name[4];
size_t num;
u64 mask;
};
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index 58998960eb9a..edc69da19b85 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -69,18 +69,23 @@ static const struct flag_info flag_array[] = {
struct ptdump_pg_level pg_level[5] = {
{ /* pgd */
+ .name = "PGD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* p4d */
+ .name = "P4D",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
+ .name = "PUD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pmd */
+ .name = "PMD",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pte */
+ .name = "PTE",
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
},