summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s/32/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s/32/pgtable.h')
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h15
1 files changed, 5 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 224912432821..36443cda8dcf 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -184,17 +184,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-/*
- * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules
- * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear
- * memory shall not share segments.
- */
-#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
-#define VMALLOC_START ((ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
- ~(VMALLOC_OFFSET - 1))
-#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
#ifdef CONFIG_KASAN_VMALLOC
#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
@@ -202,6 +192,11 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#define VMALLOC_END ioremap_bot
#endif
+#ifdef CONFIG_STRICT_KERNEL_RWX
+#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
+#define MODULES_VADDR (MODULES_END - SZ_256M)
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>