summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-01-25 07:41:07 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-12 13:12:26 +0100
commitf78d0971b7bd5bf4373a1fac27f176af5d5594ed (patch)
tree2bba940c5e657d5d7bc9e76cc4a15752abe23b9b /arch/x86
parent217aaff53c25f03b1d2fc23eff9dc2bae34f690e (diff)
KVM: VMX: Don't save guest registers after VM-Fail
A failed VM-Enter (obviously) didn't succeed, meaning the CPU never executed an instrunction in guest mode and so can't have changed the general purpose registers. In addition to saving some instructions in the VM-Fail case, this also provides a separate path entirely and thus an opportunity to propagate the fail condition to vmx->fail via register without introducing undue pain. Using a register, as opposed to directly referencing vmx->fail, eliminates the need to pass the offset of 'fail', which will simplify moving the code to proper assembly in future patches. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx/vmx.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 79b42197ed7e..1dcd3f157a70 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6416,6 +6416,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Enter guest mode */
"call vmx_vmenter\n\t"
+ "jbe 2f \n\t"
/* Temporarily save guest's RCX. */
"push %%" _ASM_CX " \n\t"
@@ -6423,9 +6424,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Reload the vcpu_vmx pointer to RCX. */
"mov %c[wordsize](%%" _ASM_SP "), %%" _ASM_CX " \n\t"
- /* Set vmx->fail based on EFLAGS.{CF,ZF} */
- "setbe %c[fail](%%" _ASM_CX ")\n\t"
-
/* Save all guest registers, including RCX from the stack */
"mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
"mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
@@ -6443,15 +6441,22 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
"mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
"mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
+#endif
+
+ /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
+ "xor %%ebx, %%ebx \n\t"
/*
- * Clear all general purpose registers (except RSP, which is loaded by
- * the CPU during VM-Exit) to prevent speculative use of the guest's
- * values, even those that are saved/loaded via the stack. In theory,
- * an L1 cache miss when restoring registers could lead to speculative
- * execution with the guest's values. Zeroing XORs are dirt cheap,
- * i.e. the extra paranoia is essentially free.
+ * Clear all general purpose registers except RSP and RBX to prevent
+ * speculative use of the guest's values, even those that are reloaded
+ * via the stack. In theory, an L1 cache miss when restoring registers
+ * could lead to speculative execution with the guest's values.
+ * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
+ * free. RSP and RBX are exempt as RSP is restored by hardware during
+ * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
*/
+ "1: \n\t"
+#ifdef CONFIG_X86_64
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -6462,7 +6467,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"xor %%r15d, %%r15d \n\t"
#endif
"xor %%eax, %%eax \n\t"
- "xor %%ebx, %%ebx \n\t"
"xor %%ecx, %%ecx \n\t"
"xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
@@ -6472,7 +6476,15 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* "POP" the vcpu_vmx pointer. */
"add $%c[wordsize], %%" _ASM_SP " \n\t"
"pop %%" _ASM_BP " \n\t"
- : ASM_CALL_CONSTRAINT, "=b"((int){0}),
+ "jmp 3f \n\t"
+
+ /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
+ "2: \n\t"
+ "mov $1, %%ebx \n\t"
+ "jmp 1b \n\t"
+ "3: \n\t"
+
+ : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
#ifdef CONFIG_X86_64
"=D"((int){0})
: "D"(vmx),
@@ -6481,7 +6493,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
: "a"(vmx),
#endif
"b"(vmx->loaded_vmcs->launched),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),