summaryrefslogtreecommitdiff
path: root/arch/arm/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/init.c')
-rw-r--r--arch/arm/mm/init.c508
1 files changed, 112 insertions, 396 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 478ea8b7db87..54bdca025c9f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/init.c
*
* Copyright (C) 1995-2005 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -21,17 +18,20 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
+#include <linux/swiotlb.h>
+#include <linux/execmem.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -93,18 +93,6 @@ EXPORT_SYMBOL(arm_dma_zone_size);
*/
phys_addr_t arm_dma_limit;
unsigned long arm_dma_pfn_limit;
-
-static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
- unsigned long dma_size)
-{
- if (size[0] <= dma_size)
- return;
-
- size[ZONE_NORMAL] = size[0] - dma_size;
- size[ZONE_DMA] = dma_size;
- hole[ZONE_NORMAL] = hole[0];
- hole[ZONE_DMA] = 0;
-}
#endif
void __init setup_dma_zone(const struct machine_desc *mdesc)
@@ -122,79 +110,40 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- struct memblock_region *reg;
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- /*
- * initialise the zones.
- */
- memset(zone_size, 0, sizeof(zone_size));
-
- /*
- * The memory size has already been determined. If we need
- * to do anything fancy with the allocation of this memory
- * to the zones, now is the time to do it.
- */
- zone_size[0] = max_low - min;
-#ifdef CONFIG_HIGHMEM
- zone_size[ZONE_HIGHMEM] = max_high - max_low;
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
#endif
-
- /*
- * Calculate the size of the holes.
- * holes = node_size - sum(bank_sizes)
- */
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_memblock(memory, reg) {
- unsigned long start = memblock_region_memory_base_pfn(reg);
- unsigned long end = memblock_region_memory_end_pfn(reg);
-
- if (start < max_low) {
- unsigned long low_end = min(end, max_low);
- zhole_size[0] -= low_end - start;
- }
+ max_zone_pfn[ZONE_NORMAL] = max_low;
#ifdef CONFIG_HIGHMEM
- if (end > max_low) {
- unsigned long high_start = max(start, max_low);
- zhole_size[ZONE_HIGHMEM] -= end - high_start;
- }
-#endif
- }
-
-#ifdef CONFIG_ZONE_DMA
- /*
- * Adjust the sizes according to any special requirements for
- * this machine type.
- */
- if (arm_dma_zone_size)
- arm_adjust_dma_zone(zone_size, zhole_size,
- arm_dma_zone_size >> PAGE_SHIFT);
+ max_zone_pfn[ZONE_HIGHMEM] = max_high;
#endif
-
- free_area_init_node(0, zone_size, min, zhole_size);
+ free_area_init(max_zone_pfn);
}
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
- return memblock_is_map_memory(__pfn_to_phys(pfn));
-}
-EXPORT_SYMBOL(pfn_valid);
-#endif
+ phys_addr_t addr = __pfn_to_phys(pfn);
+ unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
-#ifndef CONFIG_SPARSEMEM
-static void __init arm_memory_present(void)
-{
-}
-#else
-static void __init arm_memory_present(void)
-{
- struct memblock_region *reg;
+ if (__phys_to_pfn(addr) != pfn)
+ return 0;
- for_each_memblock(memory, reg)
- memory_present(0, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
+ /*
+ * If address less than pageblock_size bytes away from a present
+ * memory chunk there still will be a memory map entry for it
+ * because we round freed memory map to the pageblock boundaries.
+ */
+ if (memblock_overlaps_region(&memblock.memory,
+ ALIGN_DOWN(addr, pageblock_size),
+ pageblock_size))
+ return 1;
+
+ return 0;
}
+EXPORT_SYMBOL(pfn_valid);
#endif
static bool arm_memblock_steal_permitted = true;
@@ -205,60 +154,39 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
BUG_ON(!arm_memblock_steal_permitted);
- phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
- memblock_free(phys, size);
+ phys = memblock_phys_alloc(size, align);
+ if (!phys)
+ panic("Failed to steal %pa bytes at %pS\n",
+ &size, (void *)_RET_IP_);
+
+ memblock_phys_free(phys, size);
memblock_remove(phys, size);
return phys;
}
-static void __init arm_initrd_init(void)
+#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
+void check_cpu_icache_size(int cpuid)
{
-#ifdef CONFIG_BLK_DEV_INITRD
- phys_addr_t start;
- unsigned long size;
-
- initrd_start = initrd_end = 0;
-
- if (!phys_initrd_size)
- return;
-
- /*
- * Round the memory region to page boundaries as per free_initrd_mem()
- * This allows us to detect whether the pages overlapping the initrd
- * are in use, but more importantly, reserves the entire set of pages
- * as we don't want these pages allocated for other purposes.
- */
- start = round_down(phys_initrd_start, PAGE_SIZE);
- size = phys_initrd_size + (phys_initrd_start - start);
- size = round_up(size, PAGE_SIZE);
+ u32 size, ctr;
- if (!memblock_is_region_memory(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
- (u64)start, size);
- return;
- }
+ asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- if (memblock_is_region_reserved(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
- (u64)start, size);
- return;
- }
-
- memblock_reserve(start, size);
-
- /* Now convert initrd to virtual addresses */
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
-#endif
+ size = 1 << ((ctr & 0xf) + 2);
+ if (cpuid != 0 && icache_size != size)
+ pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
+ cpuid);
+ if (icache_size > size)
+ icache_size = size;
}
+#endif
void __init arm_memblock_init(const struct machine_desc *mdesc)
{
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
- arm_initrd_init();
+ reserve_initrd_mem();
arm_mm_memblock_reserve();
@@ -266,7 +194,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
- early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */
@@ -278,42 +205,25 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- unsigned long min, max_low, max_high;
-
memblock_allow_resize();
- max_low = max_high = 0;
-
- find_limits(&min, &max_low, &max_high);
- early_memtest((phys_addr_t)min << PAGE_SHIFT,
- (phys_addr_t)max_low << PAGE_SHIFT);
+ find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
- /*
- * Sparsemem tries to allocate bootmem in memory_present(),
- * so must be done after the fixed reservations
- */
- arm_memory_present();
+ early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+ (phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
- * sparse_init() needs the bootmem allocator up and running.
+ * sparse_init() tries to allocate memory from memblock, so must be
+ * done after the fixed reservations
*/
sparse_init();
/*
- * Now free the memory - free_area_init_node needs
+ * Now free the memory - free_area_init needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- zone_sizes_init(min, max_low, max_high);
-
- /*
- * This doesn't seem to be used by the Linux memory manager any
- * more, but is used by ll_rw_block. If we can get rid of it, we
- * also get rid of some of the stuff above as well.
- */
- min_low_pfn = min;
- max_low_pfn = max_low;
- max_pfn = max_high;
+ zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/*
@@ -327,221 +237,16 @@ static inline void poison_init_mem(void *s, size_t count)
*p++ = 0xe7fddef0;
}
-static inline void
-free_memmap(unsigned long start_pfn, unsigned long end_pfn)
-{
- struct page *start_pg, *end_pg;
- phys_addr_t pg, pgend;
-
- /*
- * Convert start_pfn/end_pfn to a struct page pointer.
- */
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
-
- /*
- * Convert to physical addresses, and
- * round start upwards and end downwards.
- */
- pg = PAGE_ALIGN(__pa(start_pg));
- pgend = __pa(end_pg) & PAGE_MASK;
-
- /*
- * If there are free pages between these,
- * free the section of the memmap array.
- */
- if (pg < pgend)
- memblock_free_early(pg, pgend - pg);
-}
-
-/*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
-static void __init free_unused_memmap(void)
-{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
-
- /*
- * This relies on each bank being in address order.
- * The banks are sorted previously in bootmem_init().
- */
- for_each_memblock(memory, reg) {
- start = memblock_region_memory_base_pfn(reg);
-
-#ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist
- * due to SPARSEMEM sections which aren't present.
- */
- start = min(start,
- ALIGN(prev_end, PAGES_PER_SECTION));
-#else
- /*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
- */
- start = round_down(start, MAX_ORDER_NR_PAGES);
-#endif
- /*
- * If we had a previous bank, and there is a space
- * between the current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
-
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
- MAX_ORDER_NR_PAGES);
- }
-
-#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end,
- ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
-}
-
-#ifdef CONFIG_HIGHMEM
-static inline void free_area_high(unsigned long pfn, unsigned long end)
-{
- for (; pfn < end; pfn++)
- free_highmem_page(pfn_to_page(pfn));
-}
-#endif
-
-static void __init free_highpages(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn;
- struct memblock_region *mem, *res;
-
- /* set highmem page free */
- for_each_memblock(memory, mem) {
- unsigned long start = memblock_region_memory_base_pfn(mem);
- unsigned long end = memblock_region_memory_end_pfn(mem);
-
- /* Ignore complete lowmem entries */
- if (end <= max_low)
- continue;
-
- if (memblock_is_nomap(mem))
- continue;
-
- /* Truncate partial highmem entries */
- if (start < max_low)
- start = max_low;
-
- /* Find and exclude any reserved regions */
- for_each_memblock(reserved, res) {
- unsigned long res_start, res_end;
-
- res_start = memblock_region_reserved_base_pfn(res);
- res_end = memblock_region_reserved_end_pfn(res);
-
- if (res_end < start)
- continue;
- if (res_start < start)
- res_start = start;
- if (res_start > end)
- res_start = end;
- if (res_end > end)
- res_end = end;
- if (res_start != start)
- free_area_high(start, res_start);
- start = res_end;
- if (start == end)
- break;
- }
-
- /* And now free anything which remains */
- if (start < end)
- free_area_high(start, end);
- }
-#endif
-}
-
-/*
- * mem_init() marks the free areas in the mem_map and tells us how much
- * memory is free. This is done after various parts of the system have
- * claimed their memory after the kernel image.
- */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
-#ifdef CONFIG_HAVE_TCM
- /* These pointers are filled in on TCM detection */
- extern u32 dtcm_end;
- extern u32 itcm_end;
+#ifdef CONFIG_ARM_LPAE
+ swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
#endif
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
-
- /* this will put all unused low memory onto the freelists */
- free_unused_memmap();
- memblock_free_all();
-
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
-#endif
-
- free_highpages();
-
- mem_init_print_info(NULL);
-
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
- pr_notice("Virtual kernel memory layout:\n"
- " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#ifdef CONFIG_HAVE_TCM
- " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
- MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
-#ifdef CONFIG_HAVE_TCM
- MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
- MLK(ITCM_OFFSET, (unsigned long) itcm_end),
-#endif
- MLK(FIXADDR_START, FIXADDR_END),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
-#ifdef CONFIG_HIGHMEM
- MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
- (PAGE_SIZE)),
-#endif
-#ifdef CONFIG_MODULES
- MLM(MODULES_VADDR, MODULES_END),
-#endif
-
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
/*
* Check boundaries twice: Some fundamental inconsistencies can
@@ -625,7 +330,7 @@ static inline void section_update(unsigned long addr, pmdval_t mask,
{
pmd_t *pmd;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+ pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
@@ -648,8 +353,8 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP);
}
-void set_section_perms(struct section_perm *perms, int n, bool set,
- struct mm_struct *mm)
+static void set_section_perms(struct section_perm *perms, int n, bool set,
+ struct mm_struct *mm)
{
size_t i;
unsigned long addr;
@@ -675,7 +380,7 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
}
-/**
+/*
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.
@@ -688,7 +393,8 @@ static void update_sections_early(struct section_perm perms[], int n)
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
- set_section_perms(perms, n, true, s->mm);
+ if (s->mm)
+ set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
@@ -711,31 +417,10 @@ static int __mark_rodata_ro(void *unused)
return 0;
}
-static int kernel_set_to_readonly __read_mostly;
-
void mark_rodata_ro(void)
{
- kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
- debug_checkwx();
-}
-
-void set_kernel_text_rw(void)
-{
- if (!kernel_set_to_readonly)
- return;
-
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
- current->active_mm);
-}
-
-void set_kernel_text_ro(void)
-{
- if (!kernel_set_to_readonly)
- return;
-
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
- current->active_mm);
+ arm_debug_checkwx();
}
#else
@@ -752,27 +437,58 @@ void free_initmem(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
-
-static int keep_initrd;
-
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
- poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
- }
+ if (start == initrd_start)
+ start = round_down(start, PAGE_SIZE);
+ if (end == initrd_end)
+ end = round_up(end, PAGE_SIZE);
+
+ poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
+#endif
+
+#ifdef CONFIG_EXECMEM
-static int __init keepinitrd_setup(char *__unused)
+#ifdef CONFIG_XIP_KERNEL
+/*
+ * The XIP kernel text is mapped in the module area for modules and
+ * some other stuff to work without any indirect relocations.
+ * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
+ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
+ */
+#undef MODULES_VADDR
+#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
+#endif
+
+#ifdef CONFIG_MMU
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
{
- keep_initrd = 1;
- return 1;
+ unsigned long fallback_start = 0, fallback_end = 0;
+
+ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
+ fallback_start = VMALLOC_START;
+ fallback_end = VMALLOC_END;
+ }
+
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL_EXEC,
+ .alignment = 1,
+ .fallback_start = fallback_start,
+ .fallback_end = fallback_end,
+ },
+ },
+ };
+
+ return &execmem_info;
}
+#endif /* CONFIG_MMU */
-__setup("keepinitrd", keepinitrd_setup);
-#endif
+#endif /* CONFIG_EXECMEM */