summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-10-14 12:12:45 +0100
committerMarc Zyngier <maz@kernel.org>2020-11-10 08:34:25 +0000
commitdefe21f49bc98b095300752aa1e19bb608f3e97d (patch)
tree8b2b5fb1ce92055a1fcde8a08e2076285e28b5bb
parentcdb5e02ed133731f8a6676a389ed40ca303cab7c (diff)
KVM: arm64: Move PC rollback on SError to HYP
Instead of handling the "PC rollback on SError during HVC" at EL1 (which requires disclosing PC to a potentially untrusted kernel), let's move this fixup to ... fixup_guest_exit(), which is where we do all fixups. Isn't that neat? Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/handle_exit.c17
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h15
2 files changed, 15 insertions, 17 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index d4e00a864ee6..f79137ee4274 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -241,23 +241,6 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
{
struct kvm_run *run = vcpu->run;
- if (ARM_SERROR_PENDING(exception_index)) {
- u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
-
- /*
- * HVC already have an adjusted PC, which we need to
- * correct in order to return to after having injected
- * the SError.
- *
- * SMC, on the other hand, is *trapped*, meaning its
- * preferred return address is the SMC itself.
- */
- if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
- *vcpu_pc(vcpu) -= 4;
-
- return 1;
- }
-
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 8b2328f62a07..84473574c2e7 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -411,6 +411,21 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
+ if (ARM_SERROR_PENDING(*exit_code)) {
+ u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
+
+ /*
+ * HVC already have an adjusted PC, which we need to
+ * correct in order to return to after having injected
+ * the SError.
+ *
+ * SMC, on the other hand, is *trapped*, meaning its
+ * preferred return address is the SMC itself.
+ */
+ if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
+ }
+
/*
* We're using the raw exception code in order to only process
* the trap if no SError is pending. We will come back to the