From 9d9f2cccde952126185e3336af0d4dc62eb254ad Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 29 Mar 2019 09:59:59 +0000 Subject: powerpc/mm: change #include "mmu_decl.h" to This patch make inclusion of mmu_decl.h independant of the location of the file including it. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm/pgtable_32.c') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6e56a6240bfa..c9cdbb84d31f 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -36,7 +36,7 @@ #include #include -#include "mmu_decl.h" +#include unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ -- cgit From 4a6d8cf90017019f3b2829b38157cd1a74c64856 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 26 Apr 2019 15:58:06 +0000 Subject: powerpc/mm: don't use pte_alloc_kernel() until slab is available on PPC32 In the same way as PPC64, implement early allocation functions and avoid calling pte_alloc_kernel() before slab is available. Reviewed-by: Aneesh Kumar K.V Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_32.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/mm/pgtable_32.c') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index c9cdbb84d31f..e54b612cbc98 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -43,11 +43,8 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[], _sinittext[], _einittext[]; -__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { - if (!slab_is_available()) - return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); - return (pte_t *)pte_fragment_alloc(mm, 1); } @@ -205,7 +202,29 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) +static void __init *early_alloc_pgtable(unsigned long size) +{ + void *ptr = memblock_alloc(size, size); + + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, size, size); + + return ptr; +} + +static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) +{ + if (pmd_none(*pmdp)) { + pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); + + pmd_populate_kernel(&init_mm, pmdp, ptep); + } + return pte_offset_kernel(pmdp, va); +} + + +int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) { pmd_t *pd; pte_t *pg; @@ -214,7 +233,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) /* Use upper 10 bits of VA to index the first level map */ pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); /* Use middle 10 bits of VA to index the second-level map */ - pg = pte_alloc_kernel(pd, va); + if (likely(slab_is_available())) + pg = pte_alloc_kernel(pd, va); + else + pg = early_pte_alloc_kernel(pd, va); if (pg != 0) { err = 0; /* The PTE should never be already set nor present in the -- cgit From b0124ff57e9405725b4dfeffbdfa929bb973ad2c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 26 Apr 2019 15:58:07 +0000 Subject: powerpc/mm: inline pte_alloc_one_kernel() and pte_alloc_one() on PPC32 pte_alloc_one_kernel() and pte_alloc_one() are simple calls to pte_fragment_alloc(), so they are good candidates for inlining as already done on PPC64. Reviewed-by: Aneesh Kumar K.V Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_32.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/powerpc/mm/pgtable_32.c') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index e54b612cbc98..2e67f9a1430b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -43,16 +43,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ extern char etext[], _stext[], _sinittext[], _einittext[]; -pte_t *pte_alloc_one_kernel(struct mm_struct *mm) -{ - return (pte_t *)pte_fragment_alloc(mm, 1); -} - -pgtable_t pte_alloc_one(struct mm_struct *mm) -{ - return (pgtable_t)pte_fragment_alloc(mm, 0); -} - void __iomem * ioremap(phys_addr_t addr, unsigned long size) { -- cgit From 453d87f6a8aed827f5ebb1708a4cea458fd68d23 Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Thu, 2 May 2019 17:39:47 +1000 Subject: powerpc/mm: Warn if W+X pages found on boot Implement code to walk all pages and warn if any are found to be both writable and executable. Depends on STRICT_KERNEL_RWX enabled, and is behind the DEBUG_WX config option. This only runs on boot and has no runtime performance implications. Very heavily influenced (and in some cases copied verbatim) from the ARM64 code written by Laura Abbott (thanks!), since our ptdump infrastructure is similar. Signed-off-by: Russell Currey [mpe: Fixup build error when disabled] Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable_32.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/mm/pgtable_32.c') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 2e67f9a1430b..16ada373b32b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -396,6 +396,9 @@ void mark_rodata_ro(void) PFN_DOWN((unsigned long)__start_rodata); change_page_attr(page, numpages, PAGE_KERNEL_RO); + + // mark_initmem_nx() should have already run by now + ptdump_check_wx(); } #endif -- cgit