summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2020-04-23 11:02:36 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2020-05-13 12:14:31 -0400
commita9fa7cb6aa997ba58294f1a07d402ce5855bafe1 (patch)
tree2c1b49be8347793f84747492bdde4c7e3aea3814 /arch/x86/kvm/vmx
parent88c604b66eb6a74841cd40f5bdf639112ad69115 (diff)
KVM: x86: replace is_smm checks with kvm_x86_ops.smi_allowed
Do not hardcode is_smm so that all the architectural conditions for blocking SMIs are listed in a single place. Well, in two places because this introduces some code duplication between Intel and AMD. This ensures that nested SVM obeys GIF in kvm_vcpu_has_events. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx')
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4ca1f1f4ef1a..d0bea514d447 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7680,7 +7680,7 @@ static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
/* we need a nested vmexit to enter SMM, postpone if run is pending */
if (to_vmx(vcpu)->nested.nested_run_pending)
return false;
- return true;
+ return !is_smm(vcpu);
}
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)