summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx')
-rw-r--r--arch/x86/kvm/vmx/vmx.c4
-rw-r--r--arch/x86/kvm/vmx/vmx.h42
2 files changed, 14 insertions, 32 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8b55f43433a4..d843ef34aafd 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6083,7 +6083,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (flexpriority_enabled) {
sec_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmx_flush_tlb(vcpu, true);
+ vmx_flush_tlb(vcpu);
}
break;
case LAPIC_MODE_X2APIC:
@@ -6101,7 +6101,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
{
if (!is_guest_mode(vcpu)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa);
- vmx_flush_tlb(vcpu, true);
+ vmx_flush_tlb(vcpu);
}
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 1560296dde25..45978552524e 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -500,46 +500,28 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
- bool invalidate_gpa)
-{
- if (enable_ept && (invalidate_gpa || !enable_vpid)) {
- if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
- return;
- ept_sync_context(construct_eptp(vcpu,
- vcpu->arch.mmu->root_hpa));
- } else {
- vpid_sync_context(vpid);
- }
-}
-
-static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * Flush all EPTP/VPID contexts if the TLB flush _may_ have been
- * invoked via kvm_flush_remote_tlbs(), which always passes %true for
- * @invalidate_gpa. Flushing remote TLBs requires all contexts to be
- * flushed, not just the active context.
+ * Flush all EPTP/VPID contexts, as the TLB flush _may_ have been
+ * invoked via kvm_flush_remote_tlbs(). Flushing remote TLBs requires
+ * all contexts to be flushed, not just the active context.
*
* Note, this also ensures a deferred TLB flush with VPID enabled and
* EPT disabled invalidates the "correct" VPID, by nuking both L1 and
* L2's VPIDs.
*/
- if (invalidate_gpa) {
- if (enable_ept) {
- ept_sync_global();
- } else if (enable_vpid) {
- if (cpu_has_vmx_invvpid_global()) {
- vpid_sync_vcpu_global();
- } else {
- vpid_sync_vcpu_single(vmx->vpid);
- vpid_sync_vcpu_single(vmx->nested.vpid02);
- }
+ if (enable_ept) {
+ ept_sync_global();
+ } else if (enable_vpid) {
+ if (cpu_has_vmx_invvpid_global()) {
+ vpid_sync_vcpu_global();
+ } else {
+ vpid_sync_vcpu_single(vmx->vpid);
+ vpid_sync_vcpu_single(vmx->nested.vpid02);
}
- } else {
- __vmx_flush_tlb(vcpu, vmx->vpid, false);
}
}