From b3646477d458fbe7694a15b9c78fbe2fa426b703 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 14 Jan 2021 22:27:56 -0500 Subject: KVM: x86: use static calls to reduce kvm_x86_ops overhead Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are covered here except for 'pmu_ops and 'nested ops'. Here are some numbers running cpuid in a loop of 1 million calls averaged over 5 runs, measured in the vm (lower is better). Intel Xeon 3000MHz: |default |mitigations=off ------------------------------------- vanilla |.671s |.486s static call|.573s(-15%)|.458s(-6%) AMD EPYC 2500MHz: |default |mitigations=off ------------------------------------- vanilla |.710s |.609s static call|.664s(-6%) |.609s(0%) Cc: Paolo Bonzini Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Sean Christopherson Signed-off-by: Jason Baron Message-Id: Signed-off-by: Paolo Bonzini --- arch/x86/kvm/kvm_cache_regs.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86/kvm/kvm_cache_regs.h') diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index a889563ad02d..2e11da2f5621 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) return 0; if (!kvm_register_is_available(vcpu, reg)) - kvm_x86_ops.cache_reg(vcpu, reg); + static_call(kvm_x86_cache_reg)(vcpu, reg); return vcpu->arch.regs[reg]; } @@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) might_sleep(); /* on svm */ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } @@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; if ((tmask & vcpu->arch.cr0_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); return vcpu->arch.cr0 & mask; } @@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; if ((tmask & vcpu->arch.cr4_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); return vcpu->arch.cr4 & mask; } static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } -- cgit