summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-01-11 11:22:34 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2023-01-24 11:51:07 +0000
commit32b135a7fafebe7843abe5425159fa081ae56b7c (patch)
treee6c314d442ae05ffa247128f977704dab32638d2
parent9d7c13e5dde31270eb48a34204a2e06b1a719546 (diff)
arm64: head: avoid cache invalidation when entering with the MMU on
If we enter with the MMU on, there is no need for explicit cache invalidation for stores to memory, as they will be coherent with the caches. Let's take advantage of this, and create the ID map with the MMU still enabled if that is how we entered, and avoid any cache invalidation calls in that case. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20230111102236.1430401-5-ardb@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/kernel/head.S5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c3b898efd3b5..d75f41920645 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -89,9 +89,9 @@
SYM_CODE_START(primary_entry)
bl record_mmu_state
bl preserve_boot_args
+ bl create_idmap
bl init_kernel_el // w0=cpu_boot_mode
mov x20, x0
- bl create_idmap
/*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -377,12 +377,13 @@ SYM_FUNC_START_LOCAL(create_idmap)
* accesses (MMU disabled), invalidate those tables again to
* remove any speculatively loaded cache lines.
*/
+ cbnz x19, 0f // skip cache invalidation if MMU is on
dmb sy
adrp x0, init_idmap_pg_dir
adrp x1, init_idmap_pg_end
bl dcache_inval_poc
- ret x28
+0: ret x28
SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping)