summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-11-13 11:38:45 +0000
committerMarc Zyngier <maz@kernel.org>2020-11-16 10:43:06 +0000
commitc4792b6dbc5070fe67f4cdcfdad39416333acbe0 (patch)
tree5a7721a792a59da36ae0568f88c4125b644e7617 /arch/arm64/kvm
parentb881cdce77b48bd488f268041f32951bab89bb0f (diff)
arm64: spectre: Rename ARM64_HARDEN_EL2_VECTORS to ARM64_SPECTRE_V3A
Since ARM64_HARDEN_EL2_VECTORS is really a mitigation for Spectre-v3a, rename it accordingly for consistency with the v2 and v4 mitigation. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20201113113847.21619-9-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/arm.c8
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S3
-rw-r--r--arch/arm64/kvm/va_layout.c4
3 files changed, 6 insertions, 9 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 5e6fe5eef3ec..4cc29300f533 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1314,7 +1314,7 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
- if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS))
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
return 0;
if (!has_vhe()) {
@@ -1388,15 +1388,15 @@ static void cpu_hyp_reset(void)
* placed in one of the vector slots, which is executed before jumping
* to the real vectors.
*
- * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
* containing the hardening sequence is mapped next to the idmap page,
* and executed before jumping to the real vectors.
*
- * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
+ * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
* empty slot is selected, mapped next to the idmap page, and
* executed before jumping to the real vectors.
*
- * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
+ * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
* VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored.
*/
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index d0a3660c7256..e3249e2dda09 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -209,8 +209,7 @@ SYM_CODE_END(__kvm_hyp_vector)
.if \indirect != 0
alternative_cb kvm_patch_vector_branch
/*
- * For ARM64_HARDEN_EL2_VECTORS configurations, these NOPs get replaced
- * with:
+ * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
*
* movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index cc8e8756600f..02362fa27be4 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -139,10 +139,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
BUG_ON(nr_inst != 4);
- if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) ||
- WARN_ON_ONCE(has_vhe())) {
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
return;
- }
/*
* Compute HYP VA by using the same computation as kern_hyp_va()