summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-09-23 11:04:05 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-09-28 07:58:00 -0400
commitbd65ba82b324e1765121f3602f9b0a89b7aa1c08 (patch)
tree48ac47bdc93cd9a9ab91cada4ffc4f5b15378e2a /arch/x86/kvm/vmx/vmx.c
parent86e3e494fe32d1e7e9180458d857916155dd2856 (diff)
KVM: VMX: Add vmx_setup_uret_msr() to handle lookup and swap
Add vmx_setup_uret_msr() to wrap the lookup and manipulation of the uret MSRs array during setup_msrs(). In addition to consolidating code, this eliminates move_msr_up(), which while being a very literally description of the function, isn't exacly helpful in understanding the net effect of the code. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200923180409.32255-12-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c49
1 files changed, 18 insertions, 31 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 1ad9faca44ef..3300e373fadf 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1614,12 +1614,15 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu);
}
-/*
- * Swap MSR entry in host/guest MSR entry array.
- */
-static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
{
struct vmx_uret_msr tmp;
+ int from, to;
+
+ from = __vmx_find_uret_msr(vmx, msr);
+ if (from < 0)
+ return;
+ to = vmx->nr_active_uret_msrs++;
tmp = vmx->guest_uret_msrs[to];
vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
@@ -1633,42 +1636,26 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
*/
static void setup_msrs(struct vcpu_vmx *vmx)
{
- int nr_active_uret_msrs, index;
-
- nr_active_uret_msrs = 0;
+ vmx->guest_uret_msrs_loaded = false;
+ vmx->nr_active_uret_msrs = 0;
#ifdef CONFIG_X86_64
/*
* The SYSCALL MSRs are only needed on long mode guests, and only
* when EFER.SCE is set.
*/
if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
- index = __vmx_find_uret_msr(vmx, MSR_STAR);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- index = __vmx_find_uret_msr(vmx, MSR_LSTAR);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- index = __vmx_find_uret_msr(vmx, MSR_SYSCALL_MASK);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ vmx_setup_uret_msr(vmx, MSR_STAR);
+ vmx_setup_uret_msr(vmx, MSR_LSTAR);
+ vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
}
#endif
- if (update_transition_efer(vmx)) {
- index = __vmx_find_uret_msr(vmx, MSR_EFER);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- }
- if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
- index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
- }
- index = __vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
- if (index >= 0)
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ if (update_transition_efer(vmx))
+ vmx_setup_uret_msr(vmx, MSR_EFER);
- vmx->nr_active_uret_msrs = nr_active_uret_msrs;
- vmx->guest_uret_msrs_loaded = false;
+ if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
+ vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+
+ vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);