summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/nohash/8xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/nohash/8xx.c')
-rw-r--r--arch/powerpc/mm/nohash/8xx.c295
1 files changed, 114 insertions, 181 deletions
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 090af2d2d3e4..6be6421086ed 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -8,28 +8,24 @@
*/
#include <linux/memblock.h>
-#include <linux/mmu_context.h>
+#include <linux/hugetlb.h>
+
#include <asm/fixmap.h>
-#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
-extern int __map_without_ltlbs;
-
static unsigned long block_mapped_ram;
/*
- * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
* Otherwise, returns 0
*/
phys_addr_t v_block_mapped(unsigned long va)
{
unsigned long p = PHYS_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
@@ -38,14 +34,13 @@ phys_addr_t v_block_mapped(unsigned long va)
}
/*
- * Return VA for a given PA mapped with LTLBs or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or fixmap
+ * Return 0 if not mapped
*/
unsigned long p_block_mapped(phys_addr_t pa)
{
unsigned long p = PHYS_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
if (pa < block_mapped_ram)
@@ -53,155 +48,145 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
}
-#define LARGE_PAGE_SIZE_8M (1<<23)
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
+static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
{
- /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
- if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
- unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
- unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
- int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
- unsigned long addr = 0;
- unsigned long mem = total_lowmem;
-
- for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
- mtspr(SPRN_MD_CTR, ctr | (i << 8));
- mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
- mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
- mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
- addr += LARGE_PAGE_SIZE_8M;
- mem -= LARGE_PAGE_SIZE_8M;
- }
+ if (hpd_val(*pmdp) == 0) {
+ pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+
+ if (!ptep)
+ return NULL;
+
+ hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
+ hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
}
+ return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
}
-static void __init mmu_mapin_immr(void)
+static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
+ pgprot_t prot, int psize, bool new)
{
- unsigned long p = PHYS_IMMR_BASE;
- unsigned long v = VIRT_IMMR_BASE;
- int offset;
+ pmd_t *pmdp = pmd_off_k(va);
+ pte_t *ptep;
+
+ if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
+ return -EINVAL;
+
+ if (new) {
+ if (WARN_ON(slab_is_available()))
+ return -EINVAL;
+
+ if (psize == MMU_PAGE_512K)
+ ptep = early_pte_alloc_kernel(pmdp, va);
+ else
+ ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
+ } else {
+ if (psize == MMU_PAGE_512K)
+ ptep = pte_offset_kernel(pmdp, va);
+ else
+ ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
+ }
+
+ if (WARN_ON(!ptep))
+ return -ENOMEM;
- for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
- map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
+ /* The PTE should never be already present */
+ if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
+ return -EINVAL;
+
+ set_huge_pte_at(&init_mm, va, ptep,
+ pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize);
+
+ return 0;
}
-static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
{
- modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
-static void mmu_patch_addis(s32 *site, long simm)
+static bool immr_is_mapped __initdata;
+
+void __init mmu_mapin_immr(void)
{
- unsigned int instr = *(unsigned int *)patch_site_addr(site);
+ if (immr_is_mapped)
+ return;
+
+ immr_is_mapped = true;
- instr &= 0xffff0000;
- instr |= ((unsigned long)simm) >> 16;
- patch_instruction_site(site, instr);
+ __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
+ PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
-void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
{
- unsigned long s = offset;
- unsigned long v = PAGE_OFFSET + s;
- phys_addr_t p = memstart_addr + s;
-
- for (; s < top; s += PAGE_SIZE) {
- map_kernel_page(v, p, prot);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- }
+ unsigned long v = PAGE_OFFSET + offset;
+ unsigned long p = offset;
+
+ WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+
+ for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+
+ if (!new)
+ flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
- unsigned long mapped;
-
- if (__map_without_ltlbs) {
- mapped = 0;
- mmu_mapin_immr();
- if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
- patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
+ unsigned long boundary = strict_boundary ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ WARN_ON(top < einittext8);
+
+ mmu_mapin_immr();
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+ if (debug_pagealloc_enabled_or_kfence()) {
+ top = boundary;
} else {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
-
- mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
-
- /*
- * Populate page tables to:
- * - have them appear in /sys/kernel/debug/kernel_page_tables
- * - allow the BDI to find the pages when they are not PINNED
- */
- mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
- mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
- mmu_mapin_immr();
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
}
- mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
- mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
-
- /* If the size of RAM is not an exact power of two, we may not
- * have covered RAM in its entirety with 8 MiB
- * pages. Consequently, restrict the top end of RAM currently
- * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
- * coverage with normal-sized pages (or other reasons) do not
- * attempt to allocate outside the allowed range.
- */
- if (mapped)
- memblock_set_current_limit(mapped);
+ if (top > SZ_32M)
+ memblock_set_current_limit(top);
- block_mapped_ram = mapped;
+ block_mapped_ram = top;
- return mapped;
+ return top;
}
void mmu_mark_initmem_nx(void)
{
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
- mmu_patch_addis(&patch__itlbmiss_linmem_top8,
- -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
- unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
- unsigned long etext = __pa(_etext);
-
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
-
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
- } else {
- mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
- }
- }
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ if (!debug_pagealloc_enabled_or_kfence())
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+
+ mmu_pin_tlb(block_mapped_ram, false);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
- unsigned long etext = __pa(_etext);
-
- if (CONFIG_DATA_SHIFT < 23)
- mmu_patch_addis(&patch__dtlbmiss_romem_top8,
- -__pa(((unsigned long)_sinittext) &
- ~(LARGE_PAGE_SIZE_8M - 1)));
- mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
-
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
- mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
+
+ mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
+ if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
+ mmu_pin_tlb(block_mapped_ram, true);
}
#endif
@@ -214,67 +199,15 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
BUG_ON(first_memblock_base != 0);
/* 8xx can only access 32MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
+ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-/*
- * Set up to use a given MMU context.
- * id is context number, pgd is PGD pointer.
- *
- * We place the physical address of the new task page directory loaded
- * into the MMU base register, and set the ASID compare register with
- * the new "context."
- */
-void set_context(unsigned long id, pgd_t *pgd)
+int pud_clear_huge(pud_t *pud)
{
- s16 offset = (s16)(__pa(swapper_pg_dir));
-
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- abatron_pteptrs[1] = pgd;
-
- /* Register M_TWB will contain base address of level 1 table minus the
- * lower part of the kernel PGDIR base address, so that all accesses to
- * level 1 table are done relative to lower part of kernel PGDIR base
- * address.
- */
- mtspr(SPRN_M_TWB, __pa(pgd) - offset);
-
- /* Update context */
- mtspr(SPRN_M_CASID, id - 1);
- /* sync */
- mb();
-}
-
-void flush_instruction_cache(void)
-{
- isync();
- mtspr(SPRN_IC_CST, IDC_INVALL);
- isync();
+ return 0;
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
+int pmd_clear_huge(pmd_t *pmd)
{
- if (disabled)
- return;
-
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- mtspr(SPRN_MI_AP, MI_APG_KUEP);
+ return 0;
}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
-{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
-
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
-}
-#endif