summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorSteve Capper <steve.capper@arm.com>2019-08-07 16:55:22 +0100
committerWill Deacon <will@kernel.org>2019-08-09 11:17:26 +0100
commitb6d00d47e81a49f6cf462518c10408f37a3e6785 (patch)
treef84df6281b176ab0f3c2fea47d8b9ef233f15b6a /arch/arm64/mm
parentce3aaed87344c83c77135f80e7b76e1da9c92ee6 (diff)
arm64: mm: Introduce 52-bit Kernel VAs
Most of the machinery is now in place to enable 52-bit kernel VAs that are detectable at boot time. This patch adds a Kconfig option for 52-bit user and kernel addresses and plumbs in the requisite CONFIG_ macros as well as sets TCR.T1SZ, physvirt_offset and vmemmap at early boot. To simplify things this patch also removes the 52-bit user/48-bit kernel kconfig option. Signed-off-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c10
-rw-r--r--arch/arm64/mm/proc.S3
2 files changed, 12 insertions, 1 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 2940221e5519..531c497c5758 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -326,6 +326,16 @@ void __init arm64_memblock_init(void)
vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
/*
+ * If we are running with a 52-bit kernel VA config on a system that
+ * does not support it, we have to offset our vmemmap and physvirt_offset
+ * s.t. we avoid the 52-bit portion of the direct linear map
+ */
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
+ vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
+ physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
+ }
+
+ /*
* Remove the memory that we will not be able to cover with the
* linear mapping. Take care not to clip the kernel which may be
* high in memory.
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8d289ff7584d..8b021c5c0884 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -438,10 +438,11 @@ ENTRY(__cpu_setup)
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_clear_errata_bits x10, x9, x5
-#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_user
sub x9, xzr, x9
add x9, x9, #64
+ tcr_set_t1sz x10, x9
#else
ldr_l x9, idmap_t0sz
#endif