summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm/init.c')
-rw-r--r--arch/riscv/mm/init.c108
1 files changed, 27 insertions, 81 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index f9f9568d689e..9dfb8b2dffd6 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -21,6 +21,7 @@
#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
+#include <asm/numa.h>
#include "../kernel/head.h"
@@ -105,55 +106,6 @@ void __init mem_init(void)
print_vm_layout();
}
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init setup_initrd(void)
-{
- phys_addr_t start;
- unsigned long size;
-
- /* Ignore the virtul address computed during device tree parsing */
- initrd_start = initrd_end = 0;
-
- if (!phys_initrd_size)
- return;
- /*
- * Round the memory region to page boundaries as per free_initrd_mem()
- * This allows us to detect whether the pages overlapping the initrd
- * are in use, but more importantly, reserves the entire set of pages
- * as we don't want these pages allocated for other purposes.
- */
- start = round_down(phys_initrd_start, PAGE_SIZE);
- size = phys_initrd_size + (phys_initrd_start - start);
- size = round_up(size, PAGE_SIZE);
-
- if (!memblock_is_region_memory(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
- (u64)start, size);
- goto disable;
- }
-
- if (memblock_is_region_reserved(start, size)) {
- pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
- (u64)start, size);
- goto disable;
- }
-
- memblock_reserve(start, size);
- /* Now convert initrd to virtual addresses */
- initrd_start = (unsigned long)__va(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- initrd_below_start_ok = 1;
-
- pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)(initrd_start), size);
- return;
-disable:
- pr_cont(" - disabling initrd\n");
- initrd_start = 0;
- initrd_end = 0;
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
void __init setup_bootmem(void)
{
phys_addr_t mem_start = 0;
@@ -198,20 +150,19 @@ void __init setup_bootmem(void)
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
-#ifdef CONFIG_BLK_DEV_INITRD
- setup_initrd();
-#endif /* CONFIG_BLK_DEV_INITRD */
-
+ reserve_initrd_mem();
/*
- * Avoid using early_init_fdt_reserve_self() since __pa() does
+ * If DTB is built in, no need to reserve its memblock.
+ * Otherwise, do reserve it but avoid using
+ * early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
+ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
memblock_allow_resize();
- memblock_dump_all();
}
#ifdef CONFIG_MMU
@@ -226,8 +177,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
-#define MAX_EARLY_MAPPING_SIZE SZ_128M
-
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
@@ -302,13 +251,7 @@ static void __init create_pte_mapping(pte_t *ptep,
pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
-
-#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
-#define NUM_EARLY_PMDS 1UL
-#else
-#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
-#endif
-pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
+pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
@@ -330,11 +273,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- uintptr_t pmd_num;
+ BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
- pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
- BUG_ON(pmd_num >= NUM_EARLY_PMDS);
- return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
+ return (uintptr_t)early_pmd;
}
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
@@ -452,7 +393,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
uintptr_t va, pa, end_va;
uintptr_t load_pa = (uintptr_t)(&_start);
uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
- uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
+ uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
@@ -464,12 +405,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* Enforce boot alignment requirements of RV32 and
* RV64 by only allowing PMD or PGD mappings.
*/
- BUG_ON(map_size == PAGE_SIZE);
+ map_size = PMD_SIZE;
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((load_pa % map_size) != 0);
- BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -511,6 +451,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
/* Setup early PMD for DTB */
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
(uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
+#ifndef CONFIG_BUILTIN_DTB
/* Create two consecutive PMD mappings for FDT early scan */
pa = dtb_pa & ~(PMD_SIZE - 1);
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
@@ -518,7 +459,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else /* CONFIG_BUILTIN_DTB */
+ dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_BUILTIN_DTB */
#else
+#ifndef CONFIG_BUILTIN_DTB
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
@@ -526,6 +471,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
+#else /* CONFIG_BUILTIN_DTB */
+ dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_BUILTIN_DTB */
#endif
dtb_early_pa = dtb_pa;
@@ -616,15 +564,7 @@ static void __init setup_vm_final(void)
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
-#ifdef CONFIG_BUILTIN_DTB
- dtb_early_va = soc_lookup_builtin_dtb();
- if (!dtb_early_va) {
- /* Fallback to first available DTS */
- dtb_early_va = (void *) __dtb_start;
- }
-#else
dtb_early_va = (void *)dtb_pa;
-#endif
dtb_early_pa = dtb_pa;
}
@@ -665,9 +605,15 @@ void mark_rodata_ro(void)
void __init paging_init(void)
{
setup_vm_final();
- sparse_init();
setup_zero_page();
+}
+
+void __init misc_mem_init(void)
+{
+ arch_numa_init();
+ sparse_init();
zone_sizes_init();
+ memblock_dump_all();
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP