summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/nested.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-12-12 13:30:08 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-14 18:00:04 +0100
commitf9b245e182ac9beb40f48faa0fd04ed3f3830d50 (patch)
tree4703a8462585c7a4fea98912e8e5f6ee8b1db164 /arch/x86/kvm/vmx/nested.c
parent461b4ba4c7ad79137171de2887e5d4d05a0ec8c1 (diff)
KVM: nVMX: Remove param indirection from nested_vmx_check_msr_switch()
Passing the enum and doing an indirect lookup is silly when we can simply pass the field directly. Remove the "fast path" code in nested_vmx_check_msr_switch_controls() as it's now nothing more than a redundant check. Remove the debug message rather than continue passing the enum for the address field. Having debug messages for the MSRs themselves is useful as MSR legality is a huge space, whereas messing up a physical address means the VMM is fundamentally broken. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/nested.c')
-rw-r--r--arch/x86/kvm/vmx/nested.c34
1 files changed, 10 insertions, 24 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 211e5ad96ae2..f594ddce7280 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -700,45 +700,31 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
}
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
- unsigned long count_field,
- unsigned long addr_field)
+ u32 count, u64 addr)
{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int maxphyaddr;
- u64 count, addr;
- if (vmcs12_read_any(vmcs12, count_field, &count) ||
- vmcs12_read_any(vmcs12, addr_field, &addr)) {
- WARN_ON(1);
- return -EINVAL;
- }
if (count == 0)
return 0;
maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
- (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
- pr_debug_ratelimited(
- "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
- addr_field, maxphyaddr, count, addr);
+ (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
return -EINVAL;
- }
+
return 0;
}
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- if (vmcs12->vm_exit_msr_load_count == 0 &&
- vmcs12->vm_exit_msr_store_count == 0 &&
- vmcs12->vm_entry_msr_load_count == 0)
- return 0; /* Fast path */
- if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
- VM_EXIT_MSR_LOAD_ADDR) ||
- nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR) ||
- nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR))
+ if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
+ vmcs12->vm_exit_msr_load_addr) ||
+ nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
+ vmcs12->vm_exit_msr_store_addr) ||
+ nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
+ vmcs12->vm_entry_msr_load_addr))
return -EINVAL;
+
return 0;
}