summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/cpufeature.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2020-10-26 13:49:31 +0000
committerMarc Zyngier <maz@kernel.org>2020-10-30 08:53:10 +0000
commitd86de40decaa14e6613af1b2783bf4d589d0f38b (patch)
treeb2403210b1625e1a829a5c43249a2a4f61a409ba /arch/arm64/include/asm/cpufeature.h
parentdfc4e3f08903ed8fe0b66cc25b64524a82654166 (diff)
arm64: cpufeature: upgrade hyp caps to final
We finalize caps before initializing kvm hyp code, and any use of cpus_have_const_cap() in kvm hyp code generates redundant and potentially unsound code to read the cpu_hwcaps array. A number of helper functions used in both hyp context and regular kernel context use cpus_have_const_cap(), as some regular kernel code runs before the capabilities are finalized. It's tedious and error-prone to write separate copies of these for hyp and non-hyp code. So that we can avoid the redundant code, let's automatically upgrade cpus_have_const_cap() to cpus_have_final_cap() when used in hyp context. With this change, there's never a reason to access to cpu_hwcaps array from hyp code, and we don't need to create an NVHE alias for this. This should have no effect on non-hyp code. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Will Deacon <will@kernel.org> Cc: David Brazdil <dbrazdil@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201026134931.28246-4-mark.rutland@arm.com
Diffstat (limited to 'arch/arm64/include/asm/cpufeature.h')
-rw-r--r--arch/arm64/include/asm/cpufeature.h26
1 files changed, 24 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9f671aa0419b..79d6a0371c78 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -375,6 +375,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false;
}
+static __always_inline bool is_vhe_hyp_code(void)
+{
+ /* Only defined for code run in VHE hyp context */
+ return __is_defined(__KVM_VHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_nvhe_hyp_code(void)
+{
+ /* Only defined for code run in NVHE hyp context */
+ return __is_defined(__KVM_NVHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_hyp_code(void)
+{
+ return is_vhe_hyp_code() || is_nvhe_hyp_code();
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@@ -444,8 +461,11 @@ static __always_inline bool cpus_have_final_cap(int num)
}
/*
- * Test for a capability, possibly with a runtime check.
+ * Test for a capability, possibly with a runtime check for non-hyp code.
*
+ * For hyp code, this behaves the same as cpus_have_final_cap().
+ *
+ * For non-hyp code:
* Before capabilities are finalized, this behaves as cpus_have_cap().
* After capabilities are finalized, this is patched to avoid a runtime check.
*
@@ -453,7 +473,9 @@ static __always_inline bool cpus_have_final_cap(int num)
*/
static __always_inline bool cpus_have_const_cap(int num)
{
- if (system_capabilities_finalized())
+ if (is_hyp_code())
+ return cpus_have_final_cap(num);
+ else if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);