summaryrefslogtreecommitdiff
path: root/arch/mips/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/init.c')
-rw-r--r--arch/mips/mm/init.c450
1 files changed, 283 insertions, 167 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4e73f10a7519..a673d3d68254 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -10,7 +10,7 @@
*/
#include <linux/bug.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -22,7 +22,7 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
@@ -30,41 +30,21 @@
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
+#include <linux/initrd.h>
+#include <linux/execmem.h>
-#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/dma.h>
-#include <asm/kmap_types.h>
+#include <asm/maar.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
-/* Atomicity and interruptability */
-#ifdef CONFIG_MIPS_MT_SMTC
-
-#include <asm/mipsmtregs.h>
-
-#define ENTER_CRITICAL(flags) \
- { \
- unsigned int mvpflags; \
- local_irq_save(flags);\
- mvpflags = dvpe()
-#define EXIT_CRITICAL(flags) \
- evpe(mvpflags); \
- local_irq_restore(flags); \
- }
-#else
-
-#define ENTER_CRITICAL(flags) local_irq_save(flags)
-#define EXIT_CRITICAL(flags) local_irq_restore(flags)
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions
@@ -74,114 +54,98 @@
*/
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(zero_page_mask);
/*
* Not static inline because used by IP27 special magic initialization code
*/
-void setup_zero_pages(void)
+static void __init setup_zero_pages(void)
{
- unsigned int order, i;
- struct page *page;
+ unsigned int order;
if (cpu_has_vce)
order = 3;
else
order = 0;
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
+ empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
-#ifdef CONFIG_MIPS_MT_SMTC
-static pte_t *kmap_coherent_pte;
-static void __init kmap_coherent_init(void)
-{
- unsigned long vaddr;
-
- /* cache the first coherent kmap pte */
- vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
- kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
-}
-#else
-static inline void kmap_coherent_init(void) {}
-#endif
-
-void *kmap_coherent(struct page *page, unsigned long addr)
+static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
{
enum fixed_addresses idx;
+ unsigned int old_mmid;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
- BUG_ON(Page_dcache_dirty(page));
+ BUG_ON(folio_test_dcache_dirty(page_folio(page)));
- inc_preempt_count();
+ preempt_disable();
+ pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
-#ifdef CONFIG_MIPS_MT_SMTC
- idx += FIX_N_COLOURS * smp_processor_id() +
- (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
-#else
idx += in_interrupt() ? FIX_N_COLOURS : 0;
-#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
- pte = mk_pte(page, PAGE_KERNEL);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+ pte = mk_pte(page, prot);
+#if defined(CONFIG_XPA)
+ entrylo = pte_to_entrylo(pte.pte_high);
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
- ENTER_CRITICAL(flags);
+ local_irq_save(flags);
old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
-#ifdef CONFIG_MIPS_MT_SMTC
- set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
- /* preload TLB instead of local_flush_tlb_one() */
- mtc0_tlbw_hazard();
- tlb_probe();
- tlb_probe_hazard();
- tlbidx = read_c0_index();
- mtc0_tlbw_hazard();
- if (tlbidx < 0)
- tlb_write_random();
- else
- tlb_write_indexed();
-#else
- tlbidx = read_c0_wired();
+ if (cpu_has_mmid) {
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(MMID_KERNEL_WIRED);
+ }
+#ifdef CONFIG_XPA
+ if (cpu_has_xpa) {
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+ }
+#endif
+ tlbidx = num_wired_entries();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
tlb_write_indexed();
-#endif
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- EXIT_CRITICAL(flags);
+ if (cpu_has_mmid)
+ write_c0_memorymapid(old_mmid);
+ local_irq_restore(flags);
return (void*) vaddr;
}
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+void *kmap_coherent(struct page *page, unsigned long addr)
+{
+ return __kmap_pgprot(page, addr, PAGE_KERNEL);
+}
+
+void *kmap_noncoherent(struct page *page, unsigned long addr)
+{
+ return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
+}
void kunmap_coherent(void)
{
-#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired;
unsigned long flags, old_ctx;
- ENTER_CRITICAL(flags);
+ local_irq_save(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = num_wired_entries() - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
@@ -191,20 +155,20 @@ void kunmap_coherent(void)
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- EXIT_CRITICAL(flags);
-#endif
- dec_preempt_count();
- preempt_check_resched();
+ local_irq_restore(flags);
+ pagefault_enable();
+ preempt_enable();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
- page_mapped(from) && !Page_dcache_dirty(from)) {
+ folio_mapped(src) && !folio_test_dcache_dirty(src)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
@@ -225,17 +189,19 @@ void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapped(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
- if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
+ if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
@@ -243,22 +209,25 @@ void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapped(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
}
+EXPORT_SYMBOL_GPL(copy_from_user_page);
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
+#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -267,9 +236,9 @@ void __init fixrange_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
@@ -278,7 +247,13 @@ void __init fixrange_init(unsigned long start, unsigned long end,
pmd = (pmd_t *)pud;
for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -291,46 +266,140 @@ void __init fixrange_init(unsigned long start, unsigned long end,
#endif
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-int page_is_ram(unsigned long pagenr)
+struct maar_walk_info {
+ struct maar_config cfg[16];
+ unsigned int num_cfg;
+};
+
+static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
+ void *data)
+{
+ struct maar_walk_info *wi = data;
+ struct maar_config *cfg = &wi->cfg[wi->num_cfg];
+ unsigned int maar_align;
+
+ /* MAAR registers hold physical addresses right shifted by 4 bits */
+ maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
+
+ /* Fill in the MAAR config entry */
+ cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
+ cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
+ cfg->attrs = MIPS_MAAR_S;
+
+ /* Ensure we don't overflow the cfg array */
+ if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
+ wi->num_cfg++;
+
+ return 0;
+}
+
+
+unsigned __weak platform_maar_init(unsigned num_pairs)
{
- int i;
+ unsigned int num_configured;
+ struct maar_walk_info wi;
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- unsigned long addr, end;
+ wi.num_cfg = 0;
+ walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
- switch (boot_mem_map.map[i].type) {
- case BOOT_MEM_RAM:
- case BOOT_MEM_INIT_RAM:
- break;
- default:
- /* not usable memory */
+ num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
+ if (num_configured < wi.num_cfg)
+ pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
+ num_pairs, wi.num_cfg);
+
+ return num_configured;
+}
+
+void maar_init(void)
+{
+ unsigned num_maars, used, i;
+ phys_addr_t lower, upper, attr;
+ static struct {
+ struct maar_config cfgs[3];
+ unsigned used;
+ } recorded = { { { 0 } }, 0 };
+
+ if (!cpu_has_maar)
+ return;
+
+ /* Detect the number of MAARs */
+ write_c0_maari(~0);
+ back_to_back_c0_hazard();
+ num_maars = read_c0_maari() + 1;
+
+ /* MAARs should be in pairs */
+ WARN_ON(num_maars % 2);
+
+ /* Set MAARs using values we recorded already */
+ if (recorded.used) {
+ used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+ BUG_ON(used != recorded.used);
+ } else {
+ /* Configure the required MAARs */
+ used = platform_maar_init(num_maars / 2);
+ }
+
+ /* Disable any further MAARs */
+ for (i = (used * 2); i < num_maars; i++) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ }
+
+ if (recorded.used)
+ return;
+
+ pr_info("MAAR configuration:\n");
+ for (i = 0; i < num_maars; i += 2) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ upper = read_c0_maar();
+#ifdef CONFIG_XPA
+ upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
+
+ write_c0_maari(i + 1);
+ back_to_back_c0_hazard();
+ lower = read_c0_maar();
+#ifdef CONFIG_XPA
+ lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
+#endif
+
+ attr = lower & upper;
+ lower = (lower & MIPS_MAAR_ADDR) << 4;
+ upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+ pr_info(" [%d]: ", i / 2);
+ if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
+ pr_cont("disabled\n");
continue;
}
- addr = PFN_UP(boot_mem_map.map[i].addr);
- end = PFN_DOWN(boot_mem_map.map[i].addr +
- boot_mem_map.map[i].size);
+ pr_cont("%pa-%pa", &lower, &upper);
- if (pagenr >= addr && pagenr < end)
- return 1;
- }
+ if (attr & MIPS_MAAR_S)
+ pr_cont(" speculate");
- return 0;
+ pr_cont("\n");
+
+ /* Record the setup for use on secondary CPUs */
+ if (used <= ARRAY_SIZE(recorded.cfgs)) {
+ recorded.cfgs[recorded.used].lower = lower;
+ recorded.cfgs[recorded.used].upper = upper;
+ recorded.cfgs[recorded.used].attrs = attr;
+ recorded.used++;
+ }
+ }
}
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long lastpfn __maybe_unused;
pagetable_init();
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
- kmap_coherent_init();
-
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
@@ -338,59 +407,34 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
- lastpfn = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
- lastpfn = max_low_pfn;
}
#endif
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
}
#ifdef CONFIG_64BIT
static struct kcore_list kcore_kseg0;
#endif
-static inline void mem_init_free_highmem(void)
+void __init arch_mm_preinit(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- if (!page_is_ram(tmp))
- SetPageReserved(page);
- else
- free_highmem_page(page);
- }
-#endif
-}
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ /*
+ * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
+ * bits to hold a full 32b physical address on MIPS32 systems.
+ */
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
- free_all_bootmem();
+ maar_init();
setup_zero_pages(); /* Setup zeroed pages. */
- mem_init_free_highmem();
- mem_init_print_info(NULL);
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
@@ -400,7 +444,12 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
+void __init arch_mm_preinit(void)
+{
+ setup_zero_pages(); /* This comes from node 0 */
+}
+#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
@@ -416,37 +465,104 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+void (*free_init_pages_eva)(void *begin, void *end) = NULL;
+
+void __weak __init prom_free_prom_memory(void)
{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
+ /* nothing to do */
}
-#endif
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
prom_free_prom_memory();
- free_initmem_default(POISON_FREE_INITMEM);
+ /*
+ * Let the platform define a specific function to free the
+ * init section since EVA may have used any possible mapping
+ * between virtual and physical addresses.
+ */
+ if (free_init_pages_eva)
+ free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
+ else
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return node_distance(cpu_to_node(from), cpu_to_node(to));
+}
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return cpu_to_node(cpu);
}
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("Failed to initialize percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
/*
- * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
- * are constants. So we use the variants from asm-offset.h until that gcc
- * will officially be retired.
- *
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
+
+#ifdef CONFIG_EXECMEM
+#ifdef MODULES_VADDR
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif
+#endif /* CONFIG_EXECMEM */