summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-02-21 19:08:51 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-23 21:04:32 +1100
commitd5f17ee96447736a84bc44ffc4b0dddb1b519222 (patch)
tree6cd30783636f3e54bdec42c13b3e95af096762c6 /arch/powerpc/mm
parent0f4a9041c7a77240fa1ff927775620b574151f34 (diff)
powerpc/8xx: don't disable large TLBs with CONFIG_STRICT_KERNEL_RWX
This patch implements handling of STRICT_KERNEL_RWX with large TLBs directly in the TLB miss handlers. To do so, etext and sinittext are aligned on 512kB boundaries and the miss handlers use 512kB pages instead of 8Mb pages for addresses close to the boundaries. It sets RO PP flags for addresses under sinittext. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/8xx_mmu.c31
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
3 files changed, 32 insertions, 3 deletions
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index ce11cbaa25d8..fe1f6443d57f 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -94,11 +94,20 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
}
-static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
+static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
{
modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
+static void mmu_patch_addis(s32 *site, long simm)
+{
+ unsigned int instr = *(unsigned int *)patch_site_addr(site);
+
+ instr &= 0xffff0000;
+ instr |= ((unsigned long)simm) >> 16;
+ patch_instruction_site(site, instr);
+}
+
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long mapped;
@@ -135,6 +144,26 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return mapped;
}
+void mmu_mark_initmem_nx(void)
+{
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
+ mmu_patch_addis(&patch__itlbmiss_linmem_top8,
+ -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
+ if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+ mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ if (CONFIG_DATA_SHIFT < 23)
+ mmu_patch_addis(&patch__dtlbmiss_romem_top8,
+ -__pa(((unsigned long)_sinittext) &
+ ~(LARGE_PAGE_SIZE_8M - 1)));
+ mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
+}
+#endif
+
void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index bc28995a37ea..41a3513cadc9 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -108,7 +108,7 @@ static void __init MMU_setup(void)
__map_without_bats = 1;
__map_without_ltlbs = 1;
}
- if (strict_kernel_rwx_enabled())
+ if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
__map_without_ltlbs = 1;
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 98fc94affc29..74ff61dabcb1 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -166,7 +166,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
-#if defined(CONFIG_PPC_BOOK3S_32)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
#else