summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-09-30 14:05:35 +0100
committerMarc Zyngier <maz@kernel.org>2020-09-30 14:05:35 +0100
commit14ef9d04928b61d699fd0dd858b14b5d8150113e (patch)
tree267d772b7068f7d0da04cf421286c65f2599f472 /arch/arm64/kvm
parent2e02cbb236d6ac12e04629f94b7032a94f968165 (diff)
parent816c347f3a48fb15370b23664760ea61286fea05 (diff)
Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/arm.c118
-rw-r--r--arch/arm64/kvm/hyp/Makefile3
-rw-r--r--arch/arm64/kvm/hyp/entry.S6
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S31
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/debug-sr.h4
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h35
-rw-r--r--arch/arm64/kvm/hyp/nvhe/.gitignore2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile60
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S19
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c15
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c13
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/hypercalls.c33
-rw-r--r--arch/arm64/kvm/pmu.c13
-rw-r--r--arch/arm64/kvm/psci.c74
-rw-r--r--arch/arm64/kvm/reset.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c3
18 files changed, 229 insertions, 211 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 318c8f2df245..043756db8f6e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -57,9 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
-config KVM_INDIRECT_VECTORS
- def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e49189012af1..f56122eedffc 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -46,10 +46,10 @@
__asm__(".arch_extension virt");
#endif
-DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
-DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
-DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
+
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@ -1263,6 +1263,53 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}
+static unsigned long nvhe_percpu_size(void)
+{
+ return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
+ (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
+}
+
+static unsigned long nvhe_percpu_order(void)
+{
+ unsigned long size = nvhe_percpu_size();
+
+ return size ? get_order(size) : 0;
+}
+
+static int kvm_map_vectors(void)
+{
+ /*
+ * SV2 = ARM64_SPECTRE_V2
+ * HEL2 = ARM64_HARDEN_EL2_VECTORS
+ *
+ * !SV2 + !HEL2 -> use direct vectors
+ * SV2 + !HEL2 -> use hardened vectors in place
+ * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
+ * SV2 + HEL2 -> use hardened vectors and use exec mapping
+ */
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+ __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+ }
+
+ if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+ unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+ /*
+ * Always allocate a spare vector slot, as we don't
+ * know yet which CPUs have a BP hardening slot that
+ * we can reuse.
+ */
+ __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+ return create_hyp_exec_mappings(vect_pa, size,
+ &__kvm_bp_vect_base);
+ }
+
+ return 0;
+}
+
static void cpu_init_hyp_mode(void)
{
phys_addr_t pgd_ptr;
@@ -1279,8 +1326,8 @@ static void cpu_init_hyp_mode(void)
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
- tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
- (unsigned long)kvm_ksym_ref(&kvm_host_data));
+ tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
pgd_ptr = kvm_mmu_get_httbr();
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
@@ -1303,7 +1350,7 @@ static void cpu_init_hyp_mode(void)
* at EL2.
*/
if (this_cpu_has_cap(ARM64_SSBS) &&
- arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
kvm_call_hyp_nvhe(__kvm_enable_ssbs);
}
}
@@ -1316,11 +1363,11 @@ static void cpu_hyp_reset(void)
static void cpu_hyp_reinit(void)
{
- kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+ kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
cpu_hyp_reset();
- __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector());
+ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
@@ -1472,8 +1519,10 @@ static void teardown_hyp_mode(void)
int cpu;
free_hyp_pgds();
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+ }
}
/**
@@ -1507,6 +1556,24 @@ static int init_hyp_mode(void)
}
/*
+ * Allocate and initialize pages for Hypervisor-mode percpu regions.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *page;
+ void *page_addr;
+
+ page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
+ if (!page) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ page_addr = page_address(page);
+ memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
+ kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+ }
+
+ /*
* Map the Hyp-code called directly from the host
*/
err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
@@ -1550,40 +1617,21 @@ static int init_hyp_mode(void)
}
}
+ /*
+ * Map Hyp percpu pages
+ */
for_each_possible_cpu(cpu) {
- struct kvm_host_data *cpu_data;
- struct kvm_cpu_context *hyp_ctxt;
- unsigned long *vector;
+ char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+ char *percpu_end = percpu_begin + nvhe_percpu_size();
- cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
- err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
+ err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
if (err) {
- kvm_err("Cannot map host CPU state: %d\n", err);
- goto out_err;
- }
-
- hyp_ctxt = per_cpu_ptr(&kvm_hyp_ctxt, cpu);
- err = create_hyp_mappings(hyp_ctxt, hyp_ctxt + 1, PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map hyp context: %d\n", err);
- goto out_err;
- }
-
- vector = per_cpu_ptr(&kvm_hyp_vector, cpu);
- err = create_hyp_mappings(vector, vector + 1, PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map hyp guest vector address\n");
+ kvm_err("Cannot map hyp percpu region\n");
goto out_err;
}
}
- err = hyp_map_aux_data();
- if (err)
- kvm_err("Cannot map host auxiliary data: %d\n", err);
-
return 0;
out_err:
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 607b8a898826..4a81eddabcd8 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
-obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index afaa8d1f2485..b0afad7a99c6 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -25,7 +25,7 @@ SYM_FUNC_START(__guest_enter)
// x1-x17: clobbered by macros
// x29: guest context
- hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
+ adr_this_cpu x1, kvm_hyp_ctxt, x2
// Store the hyp regs
save_callee_saved_regs x1
@@ -93,7 +93,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// return address to tail call into hyp_panic. As a side effect, the
// current state is saved to the guest context but it will only be
// accurate if the guest had been completely restored.
- hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
+ adr_this_cpu x0, kvm_hyp_ctxt, x1
adr x1, hyp_panic
str x1, [x0, #CPU_XREG_OFFSET(30)]
@@ -131,7 +131,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest's sp_el0
save_sp_el0 x1, x2
- hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
+ adr_this_cpu x2, kvm_hyp_ctxt, x3
// Macro ptrauth_switch_to_hyp format:
// ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index bc9f53df46f5..0a5b36eb54b3 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -63,35 +63,6 @@ el1_sync: // Guest trapped into EL2
ARM_SMCCC_ARCH_WORKAROUND_2)
cbnz w1, el1_trap
-#ifdef CONFIG_ARM64_SSBD
-alternative_cb arm64_enable_wa2_handling
- b wa2_end
-alternative_cb_end
- get_vcpu_ptr x2, x0
- ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- // Sanitize the argument and update the guest flags
- ldr x1, [sp, #8] // Guest's x1
- clz w1, w1 // Murphy's device:
- lsr w1, w1, #5 // w1 = !!w1 without using
- eor w1, w1, #1 // the flags...
- bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
- str x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
- /* Check that we actually need to perform the call */
- hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
- cbz x0, wa2_end
-
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
- smc #0
-
- /* Don't leak data from the SMC call */
- mov x3, xzr
-wa2_end:
- mov x2, xzr
- mov x1, xzr
-#endif
-
wa_epilogue:
mov x0, xzr
add sp, sp, #16
@@ -216,7 +187,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
-#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
1: esb
@@ -266,4 +236,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
-#endif
diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
index 5e28ea6aa097..4ebe9f558f3a 100644
--- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h
@@ -135,7 +135,7 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -154,7 +154,7 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 4536b50ddc06..eeac62b685a9 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -383,7 +383,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return false;
- ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
+ ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
__ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt, APDA);
@@ -476,39 +476,6 @@ exit:
return false;
}
-static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
-{
- if (!cpus_have_final_cap(ARM64_SSBD))
- return false;
-
- return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
-}
-
-static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * The host runs with the workaround always present. If the
- * guest wants it disabled, so be it...
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
-}
-
-static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_ARM64_SSBD
- /*
- * If the guest has disabled the workaround, bring it back on.
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
-}
-
static inline void __kvm_unexpected_el2_exception(void)
{
extern char __guest_exit_panic[];
diff --git a/arch/arm64/kvm/hyp/nvhe/.gitignore b/arch/arm64/kvm/hyp/nvhe/.gitignore
new file mode 100644
index 000000000000..695d73d0249e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+hyp.lds
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 46c89e8c30bc..ddde15fe85f2 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -10,40 +10,46 @@ obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o hyp-
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o
-obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
-extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
+##
+## Build rules for compiling nVHE hyp code
+## Output of this folder is `kvm_nvhe.o`, a partially linked object
+## file containing all nVHE hyp code and data.
+##
-$(obj)/%.hyp.tmp.o: $(src)/%.c FORCE
+hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
+obj-y := kvm_nvhe.o
+extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
+
+# 1) Compile all source files to `.nvhe.o` object files. The file extension
+# avoids file name clashes for files shared with VHE.
+$(obj)/%.nvhe.o: $(src)/%.c FORCE
$(call if_changed_rule,cc_o_c)
-$(obj)/%.hyp.tmp.o: $(src)/%.S FORCE
+$(obj)/%.nvhe.o: $(src)/%.S FORCE
$(call if_changed_rule,as_o_S)
-$(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
- $(call if_changed,hypcopy)
-# Disable reordering functions by GCC (enabled at -O2).
-# This pass puts functions into '.text.*' sections to aid the linker
-# in optimizing ELF layout. See HYPCOPY comment below for more info.
-ccflags-y += $(call cc-option,-fno-reorder-functions)
+# 2) Compile linker script.
+$(obj)/hyp.lds: $(src)/hyp.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+# 3) Partially link all '.nvhe.o' files and apply the linker script.
+# Prefixes names of ELF sections with '.hyp', eg. '.hyp.text'.
+# Note: The following rule assumes that the 'ld' rule puts LDFLAGS before
+# the list of dependencies to form '-T $(obj)/hyp.lds'. This is to
+# keep the dependency on the target while avoiding an error from
+# GNU ld if the linker script is passed to it twice.
+LDFLAGS_kvm_nvhe.tmp.o := -r -T
+$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
+ $(call if_changed,ld)
+
+# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
+# Prefixes names of ELF symbols with '__kvm_nvhe_'.
+$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
+ $(call if_changed,hypcopy)
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
-# and relevant ELF section names to avoid clashes with VHE code/data.
-#
-# Hyp code is assumed to be in the '.text' section of the input object
-# files (with the exception of specialized sections such as
-# '.hyp.idmap.text'). This assumption may be broken by a compiler that
-# divides code into sections like '.text.unlikely' so as to optimize
-# ELF layout. HYPCOPY checks that no such sections exist in the input
-# using `objdump`, otherwise they would be linked together with other
-# kernel code and not memory-mapped correctly at runtime.
+# to avoid clashes with VHE code/data.
quiet_cmd_hypcopy = HYPCOPY $@
- cmd_hypcopy = \
- if $(OBJDUMP) -h $< | grep -F '.text.'; then \
- echo "$@: function reordering not supported in nVHE hyp code" >&2; \
- /bin/false; \
- fi; \
- $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
- --rename-section=.text=.hyp.text \
- $< $@
+ cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
# Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace' and '__noscs' annotations.
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
new file mode 100644
index 000000000000..bb2d986ff696
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Google LLC.
+ * Written by David Brazdil <dbrazdil@google.com>
+ *
+ * Linker script used for partial linking of nVHE EL2 object files.
+ */
+
+#include <asm/hyp_image.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/memory.h>
+
+SECTIONS {
+ HYP_SECTION(.text)
+ HYP_SECTION_NAME(.data..percpu) : {
+ PERCPU_INPUT(L1_CACHE_BYTES)
+ }
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index a29f247f35e3..a457a0306e03 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -27,6 +27,11 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
+/* Non-VHE specific context */
+DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -42,7 +47,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
}
write_sysreg(val, cptr_el2);
- write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
+ write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -176,7 +181,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmr_sync();
}
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -203,8 +208,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
@@ -212,8 +215,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
@@ -254,7 +255,7 @@ void __noreturn hyp_panic(void)
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
if (vcpu) {
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index cf477f856e51..fe69de16dadc 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -28,6 +28,11 @@
const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+/* VHE specific context */
+DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
+DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
+DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -108,7 +113,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -131,8 +136,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
@@ -140,8 +143,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- __set_host_arch_workaround_state(vcpu);
-
sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu);
@@ -197,7 +198,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 996471e4c138..2a0b8c88d74f 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -66,7 +66,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -100,7 +100,7 @@ void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 550dfa3e53cd..9824025ccc5c 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
feature = smccc_get_arg1(vcpu);
switch (feature) {
case ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
val = SMCCC_RET_SUCCESS;
break;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
case ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- case KVM_SSBD_UNKNOWN:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_VULNERABLE:
break;
- case KVM_SSBD_KERNEL:
- val = SMCCC_RET_SUCCESS;
- break;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ case SPECTRE_MITIGATED:
+ /*
+ * SSBS everywhere: Indicate no firmware
+ * support, as the SSBS support will be
+ * indicated to the guest and the default is
+ * safe.
+ *
+ * Otherwise, expose a permanent mitigation
+ * to the guest, and hide SSBS so that the
+ * guest stays protected.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ break;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
break;
}
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 3c224162b3dd..faf32a44ba04 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -31,9 +31,9 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
- if (!kvm_pmu_switch_needed(attr))
+ if (!ctx || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -47,7 +47,10 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+
+ if (!ctx)
+ return;
ctx->pmu_events.events_host &= ~clr;
ctx->pmu_events.events_guest &= ~clr;
@@ -173,7 +176,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
return;
preempt_disable();
- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp_sym(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -193,7 +196,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
if (!has_vhe())
return;
- host = this_cpu_ptr(&kvm_host_data);
+ host = this_cpu_ptr_hyp_sym(kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 83415e96b589..db4056ecccfd 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid)
{
switch (regid) {
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- switch (kvm_arm_harden_branch_predictor()) {
- case KVM_BP_HARDEN_UNKNOWN:
+ switch (arm64_get_spectre_v2_state()) {
+ case SPECTRE_VULNERABLE:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
- case KVM_BP_HARDEN_WA_NEEDED:
+ case SPECTRE_MITIGATED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
- case KVM_BP_HARDEN_NOT_REQUIRED:
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
}
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- switch (kvm_arm_have_ssbd()) {
- case KVM_SSBD_FORCE_DISABLE:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
- case KVM_SSBD_KERNEL:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
- case KVM_SSBD_FORCE_ENABLE:
- case KVM_SSBD_MITIGATED:
+ switch (arm64_get_spectre_v4_state()) {
+ case SPECTRE_MITIGATED:
+ /*
+ * As for the hypercall discovery, we pretend we
+ * don't have any FW mitigation if SSBS is there at
+ * all times.
+ */
+ if (cpus_have_final_cap(ARM64_SSBS))
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+ fallthrough;
+ case SPECTRE_UNAFFECTED:
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
- case KVM_SSBD_UNKNOWN:
- default:
- return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
+ case SPECTRE_VULNERABLE:
+ return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
}
}
@@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
val = kvm_psci_version(vcpu, vcpu->kvm);
break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
- break;
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- kvm_arm_get_vcpu_workaround_2_flag(vcpu))
- val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
break;
default:
return -ENOENT;
@@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
return -EINVAL;
- wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
-
- if (get_kernel_wa_level(reg->id) < wa_level)
- return -EINVAL;
-
/* The enabled bit must not be set unless the level is AVAIL. */
- if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
- wa_level != val)
+ if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
+ (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
return -EINVAL;
- /* Are we finished or do we need to check the enable bit ? */
- if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
- return 0;
-
/*
- * If this kernel supports the workaround to be switched on
- * or off, make sure it matches the requested setting.
+ * Map all the possible incoming states to the only two we
+ * really want to deal with.
*/
- switch (wa_level) {
- case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu,
- val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
+ switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
break;
+ case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
- kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
+ wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
break;
+ default:
+ return -EINVAL;
}
+ /*
+ * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
+ * other way around.
+ */
+ if (get_kernel_wa_level(reg->id) < wa_level)
+ return -EINVAL;
+
return 0;
default:
return -ENOENT;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 2202b710d44c..f32490229a4c 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
}
- /* Default workaround setup is enabled (if supported) */
- if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
- vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-
/* Reset timer */
ret = kvm_timer_vcpu_reset(vcpu);
out:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 20ab2a7d37ca..3c203cb8c103 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1128,6 +1128,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
+ if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
+ arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
(0xfUL << ID_AA64ISAR1_API_SHIFT) |