summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/nested.c
diff options
context:
space:
mode:
authorKrish Sadhukhan <krish.sadhukhan@oracle.com>2018-12-12 13:30:07 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-14 18:00:03 +0100
commit461b4ba4c7ad79137171de2887e5d4d05a0ec8c1 (patch)
treef6581f5c93aa73110a3b4b18ff9bda405ad45157 /arch/x86/kvm/vmx/nested.c
parent16322a3b5e7ce8250025361c092903a7259c1677 (diff)
KVM: nVMX: Move the checks for VM-Execution Control Fields to a separate helper function
.. to improve readability and maintainability, and to align the code as per the layout of the checks in chapter "VM Entries" in Intel SDM vol 3C. Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com> Reviewed-by: Mark Kanda <mark.kanda@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/nested.c')
-rw-r--r--arch/x86/kvm/vmx/nested.c131
1 files changed, 62 insertions, 69 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index f1ba4871f4ac..211e5ad96ae2 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2444,94 +2444,90 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
return true;
}
-static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+/*
+ * Checks related to VM-Execution Control Fields
+ */
+static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool ia32e;
-
- if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
- vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
- if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (!nested_cpu_has_preemption_timer(vmcs12) &&
- nested_cpu_has_save_preemption_timer(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_pml_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
- vmx->nested.msrs.procbased_ctls_low,
- vmx->nested.msrs.procbased_ctls_high) ||
- (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
- !vmx_control_verify(vmcs12->secondary_vm_exec_control,
- vmx->nested.msrs.secondary_ctls_low,
- vmx->nested.msrs.secondary_ctls_high)) ||
- !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
+ if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
vmx->nested.msrs.pinbased_ctls_low,
vmx->nested.msrs.pinbased_ctls_high) ||
- !vmx_control_verify(vmcs12->vm_exit_controls,
- vmx->nested.msrs.exit_ctls_low,
- vmx->nested.msrs.exit_ctls_high) ||
- !vmx_control_verify(vmcs12->vm_entry_controls,
- vmx->nested.msrs.entry_ctls_low,
- vmx->nested.msrs.entry_ctls_high))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+ vmx->nested.msrs.procbased_ctls_low,
+ vmx->nested.msrs.procbased_ctls_high))
+ return -EINVAL;
- if (nested_vmx_check_nmi_controls(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+ vmx->nested.msrs.secondary_ctls_low,
+ vmx->nested.msrs.secondary_ctls_high))
+ return -EINVAL;
+
+ if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
+ nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
+ nested_vmx_check_nmi_controls(vmcs12) ||
+ nested_vmx_check_pml_controls(vcpu, vmcs12) ||
+ nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
+ nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
+ nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
+ (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
+ return -EINVAL;
+
+ if (nested_cpu_has_ept(vmcs12) &&
+ !valid_ept_address(vcpu, vmcs12->ept_pointer))
+ return -EINVAL;
if (nested_cpu_has_vmfunc(vmcs12)) {
if (vmcs12->vm_function_control &
~vmx->nested.msrs.vmfunc_controls)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ return -EINVAL;
if (nested_cpu_has_eptp_switching(vmcs12)) {
if (!nested_cpu_has_ept(vmcs12) ||
!page_address_valid(vcpu, vmcs12->eptp_list_address))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ return -EINVAL;
}
}
- if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
+ return 0;
+}
+
+static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool ia32e;
+
+ if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
+ vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ if (nested_check_vm_execution_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+ if (!vmx_control_verify(vmcs12->vm_exit_controls,
+ vmx->nested.msrs.exit_ctls_low,
+ vmx->nested.msrs.exit_ctls_high) ||
+ !vmx_control_verify(vmcs12->vm_entry_controls,
+ vmx->nested.msrs.entry_ctls_low,
+ vmx->nested.msrs.entry_ctls_high))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2603,10 +2599,6 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
}
}
- if (nested_cpu_has_ept(vmcs12) &&
- !valid_ept_address(vcpu, vmcs12->ept_pointer))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
return 0;
}
@@ -2638,7 +2630,8 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
}
static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12, u32 *exit_qual)
+ struct vmcs12 *vmcs12,
+ u32 *exit_qual)
{
bool ia32e;