summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/mmu.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2024-02-14 13:29:22 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2024-02-16 12:42:41 +0000
commit0dd4f60a2c76938c2625f6c630c225699d97608b (patch)
treefce658dd0e32ba152364007bbbb32497c5f0afcd /arch/arm64/mm/mmu.c
parent0383808e4d99ac31892655ae9dc93597eb6f1412 (diff)
arm64: mm: Add support for folding PUDs at runtime
In order to support LPA2 on 16k pages in a way that permits non-LPA2 systems to run the same kernel image, we have to be able to fall back to at most 48 bits of virtual addressing. Falling back to 48 bits would result in a level 0 with only 2 entries, which is suboptimal in terms of TLB utilization. So instead, let's fall back to 47 bits in that case. This means we need to be able to fold PUDs dynamically, similar to how we fold P4Ds for 48 bit virtual addressing on LPA2 with 4k pages. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20240214122845.2033971-81-ardb+git@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm/mmu.c')
-rw-r--r--arch/arm64/mm/mmu.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8e5b3a7c5afd..b131ed31a6c8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1065,7 +1065,7 @@ static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
free_empty_pmd_table(pudp, addr, next, floor, ceiling);
} while (addr = next, addr < end);
- if (CONFIG_PGTABLE_LEVELS <= 3)
+ if (!pgtable_l4_enabled())
return;
if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK))