summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-09-23 11:04:04 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-09-28 07:57:59 -0400
commit86e3e494fe32d1e7e9180458d857916155dd2856 (patch)
tree0c17159204d5b65409c8a691bc3ecbc9be9f90f6 /arch/x86/kvm/vmx/vmx.c
parentef1d2ee12e6cd5abbf9986f35238f2fb2cd21a6d (diff)
KVM: VMX: Move uret MSR lookup into update_transition_efer()
Move checking for the existence of MSR_EFER in the uret MSR array into update_transition_efer() so that the lookup and manipulation of the array in setup_msrs() occur back-to-back. This paves the way toward adding a helper to wrap the lookup and manipulation. To avoid unnecessary overhead, defer the lookup until the uret array would actually be modified in update_transition_efer(). EFER obviously exists on CPUs that support the dedicated VMCS fields for switching EFER, and EFER must exist for the guest and host EFER.NX value to diverge, i.e. there is no danger of attempting to read/write EFER when it doesn't exist. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200923180409.32255-11-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 068783a13ac8..1ad9faca44ef 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -941,10 +941,11 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
m->host.val[j].value = host_val;
}
-static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+static bool update_transition_efer(struct vcpu_vmx *vmx)
{
u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0;
+ int i;
/* Shadow paging assumes NX to be available. */
if (!enable_ept)
@@ -976,17 +977,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
else
clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
- } else {
- clear_atomic_switch_msr(vmx, MSR_EFER);
+ }
- guest_efer &= ~ignore_bits;
- guest_efer |= host_efer & ignore_bits;
+ i = __vmx_find_uret_msr(vmx, MSR_EFER);
+ if (i < 0)
+ return false;
- vmx->guest_uret_msrs[efer_offset].data = guest_efer;
- vmx->guest_uret_msrs[efer_offset].mask = ~ignore_bits;
+ clear_atomic_switch_msr(vmx, MSR_EFER);
- return true;
- }
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
+
+ vmx->guest_uret_msrs[i].data = guest_efer;
+ vmx->guest_uret_msrs[i].mask = ~ignore_bits;
+
+ return true;
}
#ifdef CONFIG_X86_32
@@ -1648,9 +1653,11 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, nr_active_uret_msrs++);
}
#endif
- index = __vmx_find_uret_msr(vmx, MSR_EFER);
- if (index >= 0 && update_transition_efer(vmx, index))
- move_msr_up(vmx, index, nr_active_uret_msrs++);
+ if (update_transition_efer(vmx)) {
+ index = __vmx_find_uret_msr(vmx, MSR_EFER);
+ if (index >= 0)
+ move_msr_up(vmx, index, nr_active_uret_msrs++);
+ }
if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) {
index = __vmx_find_uret_msr(vmx, MSR_TSC_AUX);
if (index >= 0)