summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVishal Verma <vishal.l.verma@intel.com>2025-03-18 00:35:08 -0600
committerSean Christopherson <seanjc@google.com>2025-05-02 13:37:25 -0700
commit1a81d9d5a1da862ccc49cedec1df603aafa7c600 (patch)
treef2578ed45573cb94a635b14aa0c52c3ba9cfb2c3
parent84ad4d834ce98542ad2910cecb409ba8fcfffa4b (diff)
KVM: VMX: Define a VMX glue macro for kvm_complete_insn_gp()
Define kvm_complete_insn_gp() as vmx_complete_emulated_msr() and use the glue wrapper in vt_complete_emulated_msr() so that VT's .complete_emulated_msr() implementation follows the soon-to-be-standard pattern of: vt_abc: if (is_td()) return tdx_abc(); return vmx_abc(); This will allow generating such wrappers via a macro, which in turn will make it trivially easy to skip the wrappers entirely when KVM_INTEL_TDX=n. Suggested-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/kvm/Z6v9yjWLNTU6X90d@google.com/ Cc: Sean Christopherson <seanjc@google.com> Cc: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com> Link: https://lore.kernel.org/r/20250318-vverma7-cleanup_x86_ops-v2-3-701e82d6b779@intel.com [sean: massage shortlog+changelog] Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/vmx/main.c2
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h1
2 files changed, 2 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index f56bcad2e698..05e0c7c1b66d 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -240,7 +240,7 @@ static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
if (is_td_vcpu(vcpu))
return tdx_complete_emulated_msr(vcpu, err);
- return kvm_complete_insn_gp(vcpu, err);
+ return vmx_complete_emulated_msr(vcpu, err);
}
#ifdef CONFIG_KVM_SMM
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 6bf8be570b2e..d3b58496ffd0 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -57,6 +57,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_feature_msr(u32 msr, u64 *data);
int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+#define vmx_complete_emulated_msr kvm_complete_insn_gp
u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);