From afdfd93a53aea68837b34da81d442358ff7552f3 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Tue, 13 Jul 2021 18:36:38 -0700 Subject: arm64: mte: optimize GCR_EL1 modification on kernel entry/exit Accessing GCR_EL1 and issuing an ISB can be expensive on some microarchitectures. Although we must write to GCR_EL1, we can restructure the code to avoid reading from it because the new value can be derived entirely from the exclusion mask, which is already in a GPR. Do so. Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/I560a190a74176ca4cc5191dad08f77f6b1577c75 Link: https://lore.kernel.org/r/20210714013638.3995315-1-pcc@google.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel/entry.S') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 4b779f198486..8c8581e86a1a 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -168,15 +168,11 @@ alternative_else_nop_endif #endif .endm - .macro mte_set_gcr, tmp, tmp2 + .macro mte_set_gcr, mte_ctrl, tmp #ifdef CONFIG_ARM64_MTE - /* - * Calculate and set the exclude mask preserving - * the RRND (bit[16]) setting. - */ - mrs_s \tmp2, SYS_GCR_EL1 - bfxil \tmp2, \tmp, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 - msr_s SYS_GCR_EL1, \tmp2 + ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 + orr \tmp, \tmp, #SYS_GCR_EL1_RRND + msr_s SYS_GCR_EL1, \tmp #endif .endm -- cgit