summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c129
1 files changed, 39 insertions, 90 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6eb4eab79385..0c9ef705803e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -32,8 +33,6 @@
#include <mm/mmu_decl.h>
-extern char etext[], _stext[], _sinittext[], _einittext[];
-
static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
notrace void __init early_ioremap_init(void)
@@ -49,15 +48,10 @@ notrace void __init early_ioremap_init(void)
early_ioremap_setup();
}
-static void __init *early_alloc_pgtable(unsigned long size)
+void __init *early_alloc_pgtable(unsigned long size)
{
- void *ptr = memblock_alloc(size, size);
-
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, size);
+ return memblock_alloc_or_panic(size, size);
- return ptr;
}
pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
@@ -84,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
pg = pte_alloc_kernel(pd, va);
else
pg = early_pte_alloc_kernel(pd, va);
- if (pg != 0) {
+ if (pg) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
@@ -103,19 +97,14 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s;
phys_addr_t p;
- int ktext;
+ bool ktext;
s = offset;
v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
- ktext = ((char *)v >= _stext && (char *)v < etext) ||
- ((char *)v >= _sinittext && (char *)v < _einittext);
- map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
-#ifdef CONFIG_PPC_BOOK3S_32
- if (ktext)
- hash_preload(&init_mm, v);
-#endif
+ ktext = core_kernel_text(v);
+ map_kernel_page(v, p, ktext ? PAGE_KERNEL_X : PAGE_KERNEL);
v += PAGE_SIZE;
p += PAGE_SIZE;
}
@@ -123,11 +112,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
void __init mapin_ram(void)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
+ u64 i;
- for_each_memblock(memory, reg) {
- phys_addr_t base = reg->base;
- phys_addr_t top = min(base + reg->size, total_lowmem);
+ for_each_mem_range(i, &base, &end) {
+ phys_addr_t top = min(end, total_lowmem);
if (base >= top)
continue;
@@ -136,98 +125,58 @@ void __init mapin_ram(void)
}
}
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
+static int __mark_initmem_nx(void)
{
- pte_t *kpte;
- unsigned long address;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- if (v_block_mapped(address))
- return 0;
- kpte = virt_to_kpte(address);
- if (!kpte)
- return -EINVAL;
- __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-
- return 0;
-}
+ unsigned long numpages = PFN_UP((unsigned long)_einittext) -
+ PFN_DOWN((unsigned long)_sinittext);
+ int err;
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int i, err = 0;
- unsigned long flags;
- struct page *start = page;
+ err = mmu_mark_initmem_nx();
- local_irq_save(flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr_noflush(page, prot);
+ if (!v_block_mapped((unsigned long)_sinittext)) {
+ err = set_memory_nx((unsigned long)_sinittext, numpages);
if (err)
- break;
+ return err;
+ err = set_memory_rw((unsigned long)_sinittext, numpages);
}
- wmb();
- local_irq_restore(flags);
- flush_tlb_kernel_range((unsigned long)page_address(start),
- (unsigned long)page_address(page));
return err;
}
void mark_initmem_nx(void)
{
- struct page *page = virt_to_page(_sinittext);
- unsigned long numpages = PFN_UP((unsigned long)_einittext) -
- PFN_DOWN((unsigned long)_sinittext);
+ int err = __mark_initmem_nx();
- if (v_block_mapped((unsigned long)_sinittext))
- mmu_mark_initmem_nx();
- else
- change_page_attr(page, numpages, PAGE_KERNEL);
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mark_rodata_ro(void)
+static int __mark_rodata_ro(void)
{
- struct page *page;
unsigned long numpages;
- if (v_block_mapped((unsigned long)_stext + 1)) {
- mmu_mark_rodata_ro();
- ptdump_check_wx();
- return;
- }
+ if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
- page = virt_to_page(_stext);
- numpages = PFN_UP((unsigned long)_etext) -
- PFN_DOWN((unsigned long)_stext);
+ if (v_block_mapped((unsigned long)_stext + 1))
+ return mmu_mark_rodata_ro();
- change_page_attr(page, numpages, PAGE_KERNEL_ROX);
/*
- * mark .rodata as read only. Use __init_begin rather than __end_rodata
- * to cover NOTES and EXCEPTION_TABLE.
+ * mark text and rodata as read only. __end_rodata is set by
+ * powerpc's linker script and includes tables and data
+ * requiring relocation which are not put in RO_DATA.
*/
- page = virt_to_page(__start_rodata);
- numpages = PFN_UP((unsigned long)__init_begin) -
- PFN_DOWN((unsigned long)__start_rodata);
-
- change_page_attr(page, numpages, PAGE_KERNEL_RO);
+ numpages = PFN_UP((unsigned long)__end_rodata) -
+ PFN_DOWN((unsigned long)_stext);
- // mark_initmem_nx() should have already run by now
- ptdump_check_wx();
+ return set_memory_ro((unsigned long)_stext, numpages);
}
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void mark_rodata_ro(void)
{
- if (PageHighMem(page))
- return;
+ int err = __mark_rodata_ro();
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif