summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/include/x86_64/pmu.h
blob: 3c10c4dc0ae8f4c6101db5405e1ac55f68effd18 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2023, Tencent, Inc.
 */
#ifndef SELFTEST_KVM_PMU_H
#define SELFTEST_KVM_PMU_H

#include <stdint.h>

#define KVM_PMU_EVENT_FILTER_MAX_EVENTS			300

/*
 * Encode an eventsel+umask pair into event-select MSR format.  Note, this is
 * technically AMD's format, as Intel's format only supports 8 bits for the
 * event selector, i.e. doesn't use bits 24:16 for the selector.  But, OR-ing
 * in '0' is a nop and won't clobber the CMASK.
 */
#define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) |	\
				    ((eventsel) & 0xff) |		\
				    ((umask) & 0xff) << 8)

/*
 * These are technically Intel's definitions, but except for CMASK (see above),
 * AMD's layout is compatible with Intel's.
 */
#define ARCH_PERFMON_EVENTSEL_EVENT		GENMASK_ULL(7, 0)
#define ARCH_PERFMON_EVENTSEL_UMASK		GENMASK_ULL(15, 8)
#define ARCH_PERFMON_EVENTSEL_USR		BIT_ULL(16)
#define ARCH_PERFMON_EVENTSEL_OS		BIT_ULL(17)
#define ARCH_PERFMON_EVENTSEL_EDGE		BIT_ULL(18)
#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL	BIT_ULL(19)
#define ARCH_PERFMON_EVENTSEL_INT		BIT_ULL(20)
#define ARCH_PERFMON_EVENTSEL_ANY		BIT_ULL(21)
#define ARCH_PERFMON_EVENTSEL_ENABLE		BIT_ULL(22)
#define ARCH_PERFMON_EVENTSEL_INV		BIT_ULL(23)
#define ARCH_PERFMON_EVENTSEL_CMASK		GENMASK_ULL(31, 24)

/* RDPMC control flags, Intel only. */
#define INTEL_RDPMC_METRICS			BIT_ULL(29)
#define INTEL_RDPMC_FIXED			BIT_ULL(30)
#define INTEL_RDPMC_FAST			BIT_ULL(31)

/* Fixed PMC controls, Intel only. */
#define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx)	BIT_ULL((32 + (_idx)))

#define FIXED_PMC_KERNEL			BIT_ULL(0)
#define FIXED_PMC_USER				BIT_ULL(1)
#define FIXED_PMC_ANYTHREAD			BIT_ULL(2)
#define FIXED_PMC_ENABLE_PMI			BIT_ULL(3)
#define FIXED_PMC_NR_BITS			4
#define FIXED_PMC_CTRL(_idx, _val)		((_val) << ((_idx) * FIXED_PMC_NR_BITS))

#define PMU_CAP_FW_WRITES			BIT_ULL(13)
#define PMU_CAP_LBR_FMT				0x3f

#define	INTEL_ARCH_CPU_CYCLES			RAW_EVENT(0x3c, 0x00)
#define	INTEL_ARCH_INSTRUCTIONS_RETIRED		RAW_EVENT(0xc0, 0x00)
#define	INTEL_ARCH_REFERENCE_CYCLES		RAW_EVENT(0x3c, 0x01)
#define	INTEL_ARCH_LLC_REFERENCES		RAW_EVENT(0x2e, 0x4f)
#define	INTEL_ARCH_LLC_MISSES			RAW_EVENT(0x2e, 0x41)
#define	INTEL_ARCH_BRANCHES_RETIRED		RAW_EVENT(0xc4, 0x00)
#define	INTEL_ARCH_BRANCHES_MISPREDICTED	RAW_EVENT(0xc5, 0x00)
#define	INTEL_ARCH_TOPDOWN_SLOTS		RAW_EVENT(0xa4, 0x01)

#define	AMD_ZEN_CORE_CYCLES			RAW_EVENT(0x76, 0x00)
#define	AMD_ZEN_INSTRUCTIONS_RETIRED		RAW_EVENT(0xc0, 0x00)
#define	AMD_ZEN_BRANCHES_RETIRED		RAW_EVENT(0xc2, 0x00)
#define	AMD_ZEN_BRANCHES_MISPREDICTED		RAW_EVENT(0xc3, 0x00)

/*
 * Note!  The order and thus the index of the architectural events matters as
 * support for each event is enumerated via CPUID using the index of the event.
 */
enum intel_pmu_architectural_events {
	INTEL_ARCH_CPU_CYCLES_INDEX,
	INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,
	INTEL_ARCH_REFERENCE_CYCLES_INDEX,
	INTEL_ARCH_LLC_REFERENCES_INDEX,
	INTEL_ARCH_LLC_MISSES_INDEX,
	INTEL_ARCH_BRANCHES_RETIRED_INDEX,
	INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
	INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
	NR_INTEL_ARCH_EVENTS,
};

enum amd_pmu_zen_events {
	AMD_ZEN_CORE_CYCLES_INDEX,
	AMD_ZEN_INSTRUCTIONS_INDEX,
	AMD_ZEN_BRANCHES_INDEX,
	AMD_ZEN_BRANCH_MISSES_INDEX,
	NR_AMD_ZEN_EVENTS,
};

extern const uint64_t intel_pmu_arch_events[];
extern const uint64_t amd_pmu_zen_events[];

#endif /* SELFTEST_KVM_PMU_H */