summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kvm/handle_exit.c18
-rw-r--r--virt/kvm/arm/arm.c3
4 files changed, 25 insertions, 1 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index b86fc4162539..acbf9ec7b396 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -238,6 +238,9 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index) {}
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 84fcb2a896a1..abcfd164e690 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -347,6 +347,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
int kvm_perf_init(void);
int kvm_perf_teardown(void);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 304203fa9e33..6a5a5db4292f 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -29,12 +29,19 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+{
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
+ kvm_inject_vabt(vcpu);
+}
+
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
@@ -252,7 +259,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- kvm_inject_vabt(vcpu);
/* We may still need to return for single-step */
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
&& kvm_arm_handle_step_debug(vcpu, run))
@@ -275,3 +281,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 0;
}
}
+
+/* For exit types that need handling before we can be preempted */
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
+ if (exception_index == ARM_EXCEPTION_EL1_SERROR)
+ kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 38e81631fc91..15bf026eb182 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -763,6 +763,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, run, ret);
+
preempt_enable();
ret = handle_exit(vcpu, run, ret);