diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 178 |
1 files changed, 54 insertions, 124 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 73b84166d06a..15276068f657 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -23,29 +23,38 @@ #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/slab.h> +#include <linux/set_memory.h> -#include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/setup.h> #include <asm/sections.h> +#include <asm/early_ioremap.h> #include <mm/mmu_decl.h> -extern char etext[], _stext[], _sinittext[], _einittext[]; +static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; -static void __init *early_alloc_pgtable(unsigned long size) +notrace void __init early_ioremap_init(void) { - void *ptr = memblock_alloc(size, size); + unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); + pte_t *ptep = (pte_t *)early_fixmap_pagetable; + pmd_t *pmdp = pmd_off_k(addr); - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, size); + for (; (s32)(FIXADDR_TOP - addr) > 0; + addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) + pmd_populate_kernel(&init_mm, pmdp, ptep); + + early_ioremap_setup(); +} + +void __init *early_alloc_pgtable(unsigned long size) +{ + return memblock_alloc_or_panic(size, size); - return ptr; } -static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) +pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) { if (pmd_none(*pmdp)) { pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); @@ -63,13 +72,13 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ - pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); + pd = pmd_off_k(va); /* Use middle 10 bits of VA to index the second-level map */ if (likely(slab_is_available())) pg = pte_alloc_kernel(pd, va); else pg = early_pte_alloc_kernel(pd, va); - if (pg != 0) { + if (pg) { err = 0; /* The PTE should never be already set nor present in the * hash table @@ -88,19 +97,14 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { unsigned long v, s; phys_addr_t p; - int ktext; + bool ktext; s = offset; v = PAGE_OFFSET + s; p = memstart_addr + s; for (; s < top; s += PAGE_SIZE) { - ktext = ((char *)v >= _stext && (char *)v < etext) || - ((char *)v >= _sinittext && (char *)v < _einittext); + ktext = core_kernel_text(v); map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); -#ifdef CONFIG_PPC_BOOK3S_32 - if (ktext) - hash_preload(&init_mm, v); -#endif v += PAGE_SIZE; p += PAGE_SIZE; } @@ -108,11 +112,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) void __init mapin_ram(void) { - struct memblock_region *reg; + phys_addr_t base, end; + u64 i; - for_each_memblock(memory, reg) { - phys_addr_t base = reg->base; - phys_addr_t top = min(base + reg->size, total_lowmem); + for_each_mem_range(i, &base, &end) { + phys_addr_t top = min(end, total_lowmem); if (base >= top) continue; @@ -121,132 +125,58 @@ void __init mapin_ram(void) } } -/* Scan the real Linux page tables and return a PTE pointer for - * a virtual address in a context. - * Returns true (1) if PTE was found, zero otherwise. The pointer to - * the PTE pointer is unmodified if PTE is not found. - */ -static int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) +static int __mark_initmem_nx(void) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int retval = 0; - - pgd = pgd_offset(mm, addr & PAGE_MASK); - if (pgd) { - pud = pud_offset(pgd, addr & PAGE_MASK); - if (pud && pud_present(*pud)) { - pmd = pmd_offset(pud, addr & PAGE_MASK); - if (pmd_present(*pmd)) { - pte = pte_offset_map(pmd, addr & PAGE_MASK); - if (pte) { - retval = 1; - *ptep = pte; - if (pmdp) - *pmdp = pmd; - /* XXX caller needs to do pte_unmap, yuck */ - } - } - } - } - return(retval); -} - -static int __change_page_attr_noflush(struct page *page, pgprot_t prot) -{ - pte_t *kpte; - pmd_t *kpmd; - unsigned long address; - - BUG_ON(PageHighMem(page)); - address = (unsigned long)page_address(page); - - if (v_block_mapped(address)) - return 0; - if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) - return -EINVAL; - __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); - pte_unmap(kpte); + unsigned long numpages = PFN_UP((unsigned long)_einittext) - + PFN_DOWN((unsigned long)_sinittext); + int err; - return 0; -} + err = mmu_mark_initmem_nx(); -/* - * Change the page attributes of an page in the linear mapping. - * - * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY - */ -static int change_page_attr(struct page *page, int numpages, pgprot_t prot) -{ - int i, err = 0; - unsigned long flags; - struct page *start = page; - - local_irq_save(flags); - for (i = 0; i < numpages; i++, page++) { - err = __change_page_attr_noflush(page, prot); + if (!v_block_mapped((unsigned long)_sinittext)) { + err = set_memory_nx((unsigned long)_sinittext, numpages); if (err) - break; + return err; + err = set_memory_rw((unsigned long)_sinittext, numpages); } - wmb(); - local_irq_restore(flags); - flush_tlb_kernel_range((unsigned long)page_address(start), - (unsigned long)page_address(page)); return err; } void mark_initmem_nx(void) { - struct page *page = virt_to_page(_sinittext); - unsigned long numpages = PFN_UP((unsigned long)_einittext) - - PFN_DOWN((unsigned long)_sinittext); + int err = __mark_initmem_nx(); - if (v_block_mapped((unsigned long)_stext + 1)) - mmu_mark_initmem_nx(); - else - change_page_attr(page, numpages, PAGE_KERNEL); + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #ifdef CONFIG_STRICT_KERNEL_RWX -void mark_rodata_ro(void) +static int __mark_rodata_ro(void) { - struct page *page; unsigned long numpages; - if (v_block_mapped((unsigned long)_sinittext)) { - mmu_mark_rodata_ro(); - return; - } + if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) + pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); - page = virt_to_page(_stext); - numpages = PFN_UP((unsigned long)_etext) - - PFN_DOWN((unsigned long)_stext); + if (v_block_mapped((unsigned long)_stext + 1)) + return mmu_mark_rodata_ro(); - change_page_attr(page, numpages, PAGE_KERNEL_ROX); /* - * mark .rodata as read only. Use __init_begin rather than __end_rodata - * to cover NOTES and EXCEPTION_TABLE. + * mark text and rodata as read only. __end_rodata is set by + * powerpc's linker script and includes tables and data + * requiring relocation which are not put in RO_DATA. */ - page = virt_to_page(__start_rodata); - numpages = PFN_UP((unsigned long)__init_begin) - - PFN_DOWN((unsigned long)__start_rodata); - - change_page_attr(page, numpages, PAGE_KERNEL_RO); + numpages = PFN_UP((unsigned long)__end_rodata) - + PFN_DOWN((unsigned long)_stext); - // mark_initmem_nx() should have already run by now - ptdump_check_wx(); + return set_memory_ro((unsigned long)_stext, numpages); } -#endif -#ifdef CONFIG_DEBUG_PAGEALLOC -void __kernel_map_pages(struct page *page, int numpages, int enable) +void mark_rodata_ro(void) { - if (PageHighMem(page)) - return; + int err = __mark_rodata_ro(); - change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); + if (err) + panic("%s() failed, err = %d\n", __func__, err); } -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif |