diff options
| author | Sean Christopherson <seanjc@google.com> | 2025-08-06 12:56:52 -0700 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2025-09-18 12:58:13 -0700 |
| commit | 9bae7a086394128bd829c0c04f9f75600fdc1bc1 (patch) | |
| tree | f5451164495599e2026f521d10f3a87e81599e50 | |
| parent | 30c0267f15812114d7ab96a7dd45874742d736fe (diff) | |
KVM: x86/pmu: Move initialization of valid PMCs bitmask to common x86
Move all initialization of all_valid_pmc_idx to common code, as the logic
is 100% common to Intel and AMD, and KVM heavily relies on Intel and AMD
having the same semantics. E.g. the fact that AMD doesn't support fixed
counters doesn't allow KVM to use all_valid_pmc_idx[63:32] for other
purposes.
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://lore.kernel.org/r/20250806195706.1650976-31-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
| -rw-r--r-- | arch/x86/kvm/pmu.c | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/svm/pmu.c | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx/pmu_intel.c | 5 |
3 files changed, 4 insertions, 6 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 5205f0d9ced9..b7dc5bd981ba 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -888,6 +888,10 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) */ if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters) pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0); + + bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); + bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX, + pmu->nr_arch_fixed_counters); } void kvm_pmu_init(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 25ccd6a99838..bc062285fbf5 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -210,7 +210,6 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->nr_arch_fixed_counters = 0; - bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); } static void amd_pmu_init(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 096f091980f0..edf0db8e53ad 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -579,11 +579,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); } - bitmap_set(pmu->all_valid_pmc_idx, - 0, pmu->nr_arch_gp_counters); - bitmap_set(pmu->all_valid_pmc_idx, - INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); - perf_capabilities = vcpu_get_perf_capabilities(vcpu); if (intel_pmu_lbr_is_compatible(vcpu) && (perf_capabilities & PERF_CAP_LBR_FMT)) |
