summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
diff options
context:
space:
mode:
authorAaron Lewis <aaronlewis@google.com>2023-04-07 16:32:49 -0700
committerSean Christopherson <seanjc@google.com>2023-04-14 13:20:53 -0700
commit33ef1411a36b47ae7ecdb919463b0d78576b3832 (patch)
treec92a1c8b26d6166ccef41a7b1166486e266f79f6 /tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
parentdfdeda67ea2dac57d2d7506d65cfe5a0878ad285 (diff)
KVM: selftests: Add a common helper for the PMU event filter guest code
Split out the common parts of the Intel and AMD guest code in the PMU event filter test into a helper function. This is in preparation for adding additional counters to the test. No functional changes intended. Signed-off-by: Aaron Lewis <aaronlewis@google.com> Link: https://lore.kernel.org/r/20230407233254.957013-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c')
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c29
1 files changed, 18 insertions, 11 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 2feef25ba691..13eca9357252 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -100,6 +100,15 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
GUEST_SYNC(0);
}
+static uint64_t run_and_measure_loop(uint32_t msr_base)
+{
+ uint64_t branches_retired = rdmsr(msr_base + 0);
+
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+ return rdmsr(msr_base + 0) - branches_retired;
+}
+
static void intel_guest_code(void)
{
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
@@ -108,16 +117,15 @@ static void intel_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
- br0 = rdmsr(MSR_IA32_PMC0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_IA32_PMC0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
+
+ count = run_and_measure_loop(MSR_IA32_PMC0);
+ GUEST_SYNC(count);
}
}
@@ -133,15 +141,14 @@ static void amd_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
- br0 = rdmsr(MSR_K7_PERFCTR0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_K7_PERFCTR0);
- GUEST_SYNC(br1 - br0);
+
+ count = run_and_measure_loop(MSR_K7_PERFCTR0);
+ GUEST_SYNC(count);
}
}