From 7d3332be011e4ed061c1403b30b5e54ebccb4fa2 Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Wed, 31 May 2023 11:38:17 +0200 Subject: riscv: mm: Pre-allocate PGD entries for vmalloc/modules area MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V port requires that kernel PGD entries are to be synchronized between MMs. This is done via the vmalloc_fault() function, that simply copies the PGD entries from init_mm to the faulting one. Historically, faulting in PGD entries have been a source for both bugs [1], and poor performance. One way to get rid of vmalloc faults is by pre-allocating the PGD entries. Pre-allocating the entries potientially wastes 64 * 4K (65 on SV39). The pre-allocation function is pulled from Jörg Rödel's x86 work, with the addition of 3-level page tables (PMD allocations). The pmd_alloc() function needs the ptlock cache to be initialized (when split page locks is enabled), so the pre-allocation is done in a RISC-V specific pgtable_cache_init() implementation. Pre-allocate the kernel PGD entries for the vmalloc/modules area, but only for 64b platforms. Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ # [1] Signed-off-by: Björn Töpel Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230531093817.665799-1-bjorn@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'arch/riscv/mm/init.c') diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 747e5b1ef02d..45ceaff5679e 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1363,3 +1363,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return vmemmap_populate_basepages(start, end, node, NULL); } #endif + +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) +/* + * Pre-allocates page-table pages for a specific area in the kernel + * page-table. Only the level which needs to be synchronized between + * all page-tables is allocated because the synchronization can be + * expensive. + */ +static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, + const char *area) +{ + unsigned long addr; + const char *lvl; + + for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + lvl = "p4d"; + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + goto failed; + + if (pgtable_l5_enabled) + continue; + + lvl = "pud"; + pud = pud_alloc(&init_mm, p4d, addr); + if (!pud) + goto failed; + + if (pgtable_l4_enabled) + continue; + + lvl = "pmd"; + pmd = pmd_alloc(&init_mm, pud, addr); + if (!pmd) + goto failed; + } + return; + +failed: + /* + * The pages have to be there now or they will be missing in + * process page-tables later. + */ + panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); +} + +void __init pgtable_cache_init(void) +{ + preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); + if (IS_ENABLED(CONFIG_MODULES)) + preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); +} +#endif -- cgit