summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/mmu/mmu.c7
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h28
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h1
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c8
-rw-r--r--arch/x86/kvm/x86.c5
6 files changed, 42 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f164c6c1514a..c5fb4115176d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1269,7 +1269,12 @@ struct kvm_vm_stat {
struct kvm_vcpu_stat {
struct kvm_vcpu_stat_generic generic;
+ u64 pf_taken;
u64 pf_fixed;
+ u64 pf_emulate;
+ u64 pf_spurious;
+ u64 pf_fast;
+ u64 pf_mmio_spte_created;
u64 pf_guest;
u64 tlb_flush;
u64 invlpg;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f22cbb2aef53..59e02fb705c0 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2660,6 +2660,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
*sptep, write_fault, gfn);
if (unlikely(is_noslot_pfn(pfn))) {
+ vcpu->stat.pf_mmio_spte_created++;
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
return RET_PF_EMULATE;
}
@@ -2980,7 +2981,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return ret;
direct_pte_prefetch(vcpu, it.sptep);
- ++vcpu->stat.pf_fixed;
return ret;
}
@@ -3249,6 +3249,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
walk_shadow_page_lockless_end(vcpu);
+ if (ret != RET_PF_INVALID)
+ vcpu->stat.pf_fast++;
+
return ret;
}
@@ -5354,7 +5357,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
write_unlock(&vcpu->kvm->mmu_lock);
}
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
+int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
int r, emulation_type = EMULTYPE_PF;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 9caa747ee033..bd2a26897b97 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -248,11 +248,35 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
};
+ int r;
+
+ /*
+ * Async #PF "faults", a.k.a. prefetch faults, are not faults from the
+ * guest perspective and have already been counted at the time of the
+ * original fault.
+ */
+ if (!prefetch)
+ vcpu->stat.pf_taken++;
if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
- return kvm_tdp_page_fault(vcpu, &fault);
+ r = kvm_tdp_page_fault(vcpu, &fault);
+ else
+ r = vcpu->arch.mmu->page_fault(vcpu, &fault);
- return vcpu->arch.mmu->page_fault(vcpu, &fault);
+ /*
+ * Similar to above, prefetch faults aren't truly spurious, and the
+ * async #PF path doesn't do emulation. Do count faults that are fixed
+ * by the async #PF handler though, otherwise they'll never be counted.
+ */
+ if (r == RET_PF_FIXED)
+ vcpu->stat.pf_fixed++;
+ else if (prefetch)
+ ;
+ else if (r == RET_PF_EMULATE)
+ vcpu->stat.pf_emulate++;
+ else if (r == RET_PF_SPURIOUS)
+ vcpu->stat.pf_spurious++;
+ return r;
}
int kvm_mmu_max_mapping_level(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 7f8f1c8dbed2..db80f7ccaa4e 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -723,7 +723,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
return ret;
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
- ++vcpu->stat.pf_fixed;
return ret;
out_gpte_changed:
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7fc4ead5ce0d..841feaa48be5 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1100,6 +1100,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
if (unlikely(is_mmio_spte(new_spte))) {
+ vcpu->stat.pf_mmio_spte_created++;
trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
new_spte);
ret = RET_PF_EMULATE;
@@ -1108,13 +1109,6 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
rcu_dereference(iter->sptep));
}
- /*
- * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
- * consistent with legacy MMU behavior.
- */
- if (ret != RET_PF_SPURIOUS)
- vcpu->stat.pf_fixed++;
-
return ret;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ac6007a476e5..85dfa414582d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -266,7 +266,12 @@ const struct kvm_stats_header kvm_vm_stats_header = {
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, pf_taken),
STATS_DESC_COUNTER(VCPU, pf_fixed),
+ STATS_DESC_COUNTER(VCPU, pf_emulate),
+ STATS_DESC_COUNTER(VCPU, pf_spurious),
+ STATS_DESC_COUNTER(VCPU, pf_fast),
+ STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
STATS_DESC_COUNTER(VCPU, pf_guest),
STATS_DESC_COUNTER(VCPU, tlb_flush),
STATS_DESC_COUNTER(VCPU, invlpg),