summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/pmu_intel.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx/pmu_intel.c')
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c582
1 files changed, 317 insertions, 265 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 466d18fc0c5d..de1d9785c01f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -8,147 +8,129 @@
* Avi Kivity <avi@redhat.com>
* Gleb Natapov <gleb@redhat.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "nested.h"
#include "pmu.h"
+#include "tdx.h"
-#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
+/*
+ * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
+ * to encode the "type" of counter to read, i.e. this is not a "base". And to
+ * further confuse things, non-architectural PMUs use bit 31 as a flag for
+ * "fast" reads, whereas the "type" is an explicit value.
+ */
+#define INTEL_RDPMC_GP 0
+#define INTEL_RDPMC_FIXED INTEL_PMC_FIXED_RDPMC_BASE
-static struct kvm_event_hw_type_mapping intel_arch_events[] = {
- [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
- [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
- [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
- [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
- [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
- [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
- [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- /* The above index must match CPUID 0x0A.EBX bit vector */
- [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
-};
+#define INTEL_RDPMC_TYPE_MASK GENMASK(31, 16)
+#define INTEL_RDPMC_INDEX_MASK GENMASK(15, 0)
-/* mapping between fixed pmc index and intel_arch_events array */
-static int fixed_pmc_events[] = {1, 0, 7};
+#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
-static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
{
- int i;
-
- for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
- u8 new_ctrl = fixed_ctrl_field(data, i);
- u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
- struct kvm_pmc *pmc;
-
- pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
-
- if (old_ctrl == new_ctrl)
- continue;
-
- __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
- reprogram_fixed_counter(pmc, new_ctrl, i);
- }
+ if (is_td_vcpu(vcpu))
+ return NULL;
- pmu->fixed_ctr_ctrl = data;
+ return &to_vmx(vcpu)->lbr_desc;
}
-/* function is called when global control register has been updated. */
-static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
{
- int bit;
- u64 diff = pmu->global_ctrl ^ data;
-
- pmu->global_ctrl = data;
+ if (is_td_vcpu(vcpu))
+ return NULL;
- for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
- reprogram_counter(pmu, bit);
+ return &to_vmx(vcpu)->lbr_desc.records;
}
-static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
+#pragma GCC poison to_vmx
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
- u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+ struct kvm_pmc *pmc;
+ u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
int i;
- for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
- if (intel_arch_events[i].eventsel != event_select ||
- intel_arch_events[i].unit_mask != unit_mask)
- continue;
-
- /* disable event that reported as not present by cpuid */
- if ((i < 7) && !(pmu->available_event_types & (1 << i)))
- return PERF_COUNT_HW_MAX + 1;
-
- break;
- }
-
- if (i == ARRAY_SIZE(intel_arch_events))
- return PERF_COUNT_HW_MAX;
-
- return intel_arch_events[i].event_type;
-}
-
-/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
-static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
-{
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ pmu->fixed_ctr_ctrl = data;
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ u8 new_ctrl = fixed_ctrl_field(data, i);
+ u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
- return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
+ if (old_ctrl == new_ctrl)
+ continue;
-static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
- if (pmc_idx < INTEL_PMC_IDX_FIXED)
- return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
- MSR_P6_EVNTSEL0);
- else {
- u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
+ pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
- return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
+ __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
+ kvm_pmu_request_counter_reprogram(pmc);
}
}
-static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
-{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- bool fixed = idx & (1u << 30);
-
- idx &= ~(3u << 30);
-
- return fixed ? idx < pmu->nr_arch_fixed_counters
- : idx < pmu->nr_arch_gp_counters;
-}
-
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
+ unsigned int type = idx & INTEL_RDPMC_TYPE_MASK;
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
unsigned int num_counters;
+ u64 bitmask;
- idx &= ~(3u << 30);
- if (fixed) {
+ /*
+ * The encoding of ECX for RDPMC is different for architectural versus
+ * non-architecturals PMUs (PMUs with version '0'). For architectural
+ * PMUs, bits 31:16 specify the PMC type and bits 15:0 specify the PMC
+ * index. For non-architectural PMUs, bit 31 is a "fast" flag, and
+ * bits 30:0 specify the PMC index.
+ *
+ * Yell and reject attempts to read PMCs for a non-architectural PMU,
+ * as KVM doesn't support such PMUs.
+ */
+ if (WARN_ON_ONCE(!pmu->version))
+ return NULL;
+
+ /*
+ * General Purpose (GP) PMCs are supported on all PMUs, and fixed PMCs
+ * are supported on all architectural PMUs, i.e. on all virtual PMUs
+ * supported by KVM. Note, KVM only emulates fixed PMCs for PMU v2+,
+ * but the type itself is still valid, i.e. let RDPMC fail due to
+ * accessing a non-existent counter. Reject attempts to read all other
+ * types, which are unknown/unsupported.
+ */
+ switch (type) {
+ case INTEL_RDPMC_FIXED:
counters = pmu->fixed_counters;
num_counters = pmu->nr_arch_fixed_counters;
- } else {
+ bitmask = pmu->counter_bitmask[KVM_PMC_FIXED];
+ break;
+ case INTEL_RDPMC_GP:
counters = pmu->gp_counters;
num_counters = pmu->nr_arch_gp_counters;
+ bitmask = pmu->counter_bitmask[KVM_PMC_GP];
+ break;
+ default:
+ return NULL;
}
+
+ idx &= INTEL_RDPMC_INDEX_MASK;
if (idx >= num_counters)
return NULL;
- *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
+
+ *mask &= bitmask;
return &counters[array_index_nospec(idx, num_counters)];
}
static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
{
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PDCM))
return 0;
return vcpu->arch.perf_capabilities;
@@ -156,7 +138,7 @@ static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
{
- return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
+ return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
}
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
@@ -167,21 +149,20 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
}
-bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
+static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
{
- /*
- * As a first step, a guest could only enable LBR feature if its
- * cpu model is the same as the host because the LBR registers
- * would be pass-through to the guest and they're model specific.
- */
- return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
+ if (is_td_vcpu(vcpu))
+ return false;
+
+ return cpuid_model_is_consistent(vcpu);
}
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
{
- struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
+ if (is_td_vcpu(vcpu))
+ return false;
- return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
+ return !!vcpu_to_lbr_records(vcpu)->nr;
}
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
@@ -205,14 +186,22 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ u64 perf_capabilities;
int ret;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
- case MSR_CORE_PERF_GLOBAL_STATUS:
- case MSR_CORE_PERF_GLOBAL_CTRL:
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- ret = pmu->version > 1;
+ return kvm_pmu_has_perf_global_ctrl(pmu);
+ case MSR_IA32_PEBS_ENABLE:
+ ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
+ break;
+ case MSR_IA32_DS_AREA:
+ ret = guest_cpu_cap_has(vcpu, X86_FEATURE_DS);
+ break;
+ case MSR_PEBS_DATA_CFG:
+ perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+ ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
+ ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
@@ -241,6 +230,9 @@ static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
{
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ if (!lbr_desc)
+ return;
+
if (lbr_desc->event) {
perf_event_release_kernel(lbr_desc->event);
lbr_desc->event = NULL;
@@ -282,6 +274,9 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
PERF_SAMPLE_BRANCH_USER,
};
+ if (WARN_ON_ONCE(!lbr_desc))
+ return 0;
+
if (unlikely(lbr_desc->event)) {
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
return 0;
@@ -326,9 +321,9 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
local_irq_disable();
if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
if (read)
- rdmsrl(index, msr_info->data);
+ rdmsrq(index, msr_info->data);
else
- wrmsrl(index, msr_info->data);
+ wrmsrq(index, msr_info->data);
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
local_irq_enable();
return true;
@@ -351,36 +346,38 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
msr_info->data = pmu->fixed_ctr_ctrl;
- return 0;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- msr_info->data = pmu->global_status;
- return 0;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- msr_info->data = pmu->global_ctrl;
- return 0;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- msr_info->data = 0;
- return 0;
+ break;
+ case MSR_IA32_PEBS_ENABLE:
+ msr_info->data = pmu->pebs_enable;
+ break;
+ case MSR_IA32_DS_AREA:
+ msr_info->data = pmu->ds_area;
+ break;
+ case MSR_PEBS_DATA_CFG:
+ msr_info->data = pmu->pebs_data_cfg;
+ break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
u64 val = pmc_read_counter(pmc);
msr_info->data =
val & pmu->counter_bitmask[KVM_PMC_GP];
- return 0;
+ break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
u64 val = pmc_read_counter(pmc);
msr_info->data =
val & pmu->counter_bitmask[KVM_PMC_FIXED];
- return 0;
+ break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
msr_info->data = pmc->eventsel;
- return 0;
- } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
- return 0;
+ break;
+ } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
+ break;
+ }
+ return 1;
}
- return 1;
+ return 0;
}
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -389,36 +386,37 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct kvm_pmc *pmc;
u32 msr = msr_info->index;
u64 data = msr_info->data;
+ u64 reserved_bits, diff;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
- if (pmu->fixed_ctr_ctrl == data)
- return 0;
- if (!(data & 0xfffffffffffff444ull)) {
+ if (data & pmu->fixed_ctr_ctrl_rsvd)
+ return 1;
+
+ if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data);
- return 0;
- }
break;
- case MSR_CORE_PERF_GLOBAL_STATUS:
- if (msr_info->host_initiated) {
- pmu->global_status = data;
- return 0;
- }
- break; /* RO MSR */
- case MSR_CORE_PERF_GLOBAL_CTRL:
- if (pmu->global_ctrl == data)
- return 0;
- if (kvm_valid_perf_global_ctrl(pmu, data)) {
- global_ctrl_changed(pmu, data);
- return 0;
+ case MSR_IA32_PEBS_ENABLE:
+ if (data & pmu->pebs_enable_rsvd)
+ return 1;
+
+ if (pmu->pebs_enable != data) {
+ diff = pmu->pebs_enable ^ data;
+ pmu->pebs_enable = data;
+ reprogram_counters(pmu, diff);
}
break;
- case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (!(data & pmu->global_ovf_ctrl_mask)) {
- if (!msr_info->host_initiated)
- pmu->global_status &= ~data;
- return 0;
- }
+ case MSR_IA32_DS_AREA:
+ if (is_noncanonical_msr_address(data, vcpu))
+ return 1;
+
+ pmu->ds_area = data;
+ break;
+ case MSR_PEBS_DATA_CFG:
+ if (data & pmu->pebs_data_cfg_rsvd)
+ return 1;
+
+ pmu->pebs_data_cfg = data;
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
@@ -426,69 +424,107 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
return 1;
+
if (!msr_info->host_initiated &&
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
- pmc->counter += data - pmc_read_counter(pmc);
- if (pmc->perf_event && !pmc->is_paused)
- perf_event_period(pmc->perf_event,
- get_sample_period(pmc, data));
- return 0;
+ pmc_write_counter(pmc, data);
+ break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
- pmc->counter += data - pmc_read_counter(pmc);
- if (pmc->perf_event && !pmc->is_paused)
- perf_event_period(pmc->perf_event,
- get_sample_period(pmc, data));
- return 0;
+ pmc_write_counter(pmc, data);
+ break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
- if (data == pmc->eventsel)
- return 0;
- if (!(data & pmu->reserved_bits)) {
- reprogram_gp_counter(pmc, data);
- return 0;
+ reserved_bits = pmu->reserved_bits;
+ if ((pmc->idx == 2) &&
+ (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+ reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+ if (data & reserved_bits)
+ return 1;
+
+ if (data != pmc->eventsel) {
+ pmc->eventsel = data;
+ kvm_pmu_request_counter_reprogram(pmc);
}
- } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
- return 0;
+ break;
+ } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
+ break;
+ }
+ /* Not a known PMU MSR. */
+ return 1;
}
- return 1;
+ return 0;
}
-static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
+/*
+ * Map fixed counter events to architectural general purpose event encodings.
+ * Perf doesn't provide APIs to allow KVM to directly program a fixed counter,
+ * and so KVM instead programs the architectural event to effectively request
+ * the fixed counter. Perf isn't guaranteed to use a fixed counter and may
+ * instead program the encoding into a general purpose counter, e.g. if a
+ * different perf_event is already utilizing the requested counter, but the end
+ * result is the same (ignoring the fact that using a general purpose counter
+ * will likely exacerbate counter contention).
+ *
+ * Forcibly inlined to allow asserting on @index at build time, and there should
+ * never be more than one user.
+ */
+static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
+{
+ const enum perf_hw_id fixed_pmc_perf_ids[] = {
+ [0] = PERF_COUNT_HW_INSTRUCTIONS,
+ [1] = PERF_COUNT_HW_CPU_CYCLES,
+ [2] = PERF_COUNT_HW_REF_CPU_CYCLES,
+ };
+ u64 eventsel;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUNTERS);
+ BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUNTERS);
+
+ /*
+ * Yell if perf reports support for a fixed counter but perf doesn't
+ * have a known encoding for the associated general purpose event.
+ */
+ eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]);
+ WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed);
+ return eventsel;
+}
+
+static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
{
- size_t size = ARRAY_SIZE(fixed_pmc_events);
- struct kvm_pmc *pmc;
- u32 event;
int i;
- for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
- pmc = &pmu->fixed_counters[i];
- event = fixed_pmc_events[array_index_nospec(i, size)];
- pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
- intel_arch_events[event].eventsel;
- }
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
}
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
-
- struct x86_pmu_capability x86_pmu;
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
union cpuid10_edx edx;
+ u64 perf_capabilities;
+ u64 counter_rsvd;
+
+ if (!lbr_desc)
+ return;
- pmu->nr_arch_gp_counters = 0;
- pmu->nr_arch_fixed_counters = 0;
- pmu->counter_bitmask[KVM_PMC_GP] = 0;
- pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
- pmu->version = 0;
- pmu->reserved_bits = 0xffffffff00200000ull;
+ memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
- entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry || !enable_pmu)
+ /*
+ * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
+ * and PMU refresh is disallowed after the vCPU has run, i.e. this code
+ * should never be reached while KVM is passing through MSRs.
+ */
+ if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
return;
+
+ entry = kvm_find_cpuid_entry(vcpu, 0xa);
+ if (!entry)
+ return;
+
eax.full = entry->eax;
edx.full = entry->edx;
@@ -496,60 +532,72 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (!pmu->version)
return;
- perf_get_x86_pmu_capability(&x86_pmu);
-
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
- x86_pmu.num_counters_gp);
- eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
- pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
- eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
- pmu->available_event_types = ~entry->ebx &
- ((1ull << eax.split.mask_length) - 1);
-
- if (pmu->version == 1) {
- pmu->nr_arch_fixed_counters = 0;
- } else {
- pmu->nr_arch_fixed_counters =
- min3(ARRAY_SIZE(fixed_pmc_events),
- (size_t) edx.split.num_counters_fixed,
- (size_t) x86_pmu.num_counters_fixed);
- edx.split.bit_width_fixed = min_t(int,
- edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
- pmu->counter_bitmask[KVM_PMC_FIXED] =
- ((u64)1 << edx.split.bit_width_fixed) - 1;
- setup_fixed_pmc_eventsel(pmu);
- }
-
- pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
- pmu->global_ctrl_mask = ~pmu->global_ctrl;
- pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
- & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
- MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
- if (vmx_pt_mode_is_host_guest())
- pmu->global_ovf_ctrl_mask &=
- ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
-
- entry = kvm_find_cpuid_entry(vcpu, 7, 0);
+ kvm_pmu_cap.num_counters_gp);
+ eax.split.bit_width = min_t(int, eax.split.bit_width,
+ kvm_pmu_cap.bit_width_gp);
+ pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(eax.split.bit_width) - 1;
+ eax.split.mask_length = min_t(int, eax.split.mask_length,
+ kvm_pmu_cap.events_mask_len);
+ pmu->available_event_types = ~entry->ebx & (BIT_ULL(eax.split.mask_length) - 1);
+
+ entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
if (entry &&
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
- (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
- pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
-
- bitmap_set(pmu->all_valid_pmc_idx,
- 0, pmu->nr_arch_gp_counters);
- bitmap_set(pmu->all_valid_pmc_idx,
- INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
-
- nested_vmx_pmu_entry_exit_ctls_update(vcpu);
+ (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+ pmu->reserved_bits ^= HSW_IN_TX;
+ pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+ }
- if (intel_pmu_lbr_is_compatible(vcpu))
- x86_perf_get_lbr(&lbr_desc->records);
+ perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+ if (intel_pmu_lbr_is_compatible(vcpu) &&
+ (perf_capabilities & PERF_CAP_LBR_FMT))
+ memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
else
lbr_desc->records.nr = 0;
if (lbr_desc->records.nr)
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
+
+ if (pmu->version == 1)
+ return;
+
+ pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
+ kvm_pmu_cap.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
+ kvm_pmu_cap.bit_width_fixed);
+ pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
+
+ intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
+ INTEL_FIXED_0_USER |
+ INTEL_FIXED_0_ENABLE_PMI);
+
+ counter_rsvd = ~((BIT_ULL(pmu->nr_arch_gp_counters) - 1) |
+ ((BIT_ULL(pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
+ pmu->global_ctrl_rsvd = counter_rsvd;
+
+ /*
+ * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
+ * share reserved bit definitions. The kernel just happens to use
+ * OVF_CTRL for the names.
+ */
+ pmu->global_status_rsvd = pmu->global_ctrl_rsvd
+ & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
+ MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
+ if (vmx_pt_mode_is_host_guest())
+ pmu->global_status_rsvd &=
+ ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
+
+ if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
+ if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
+ pmu->pebs_enable_rsvd = counter_rsvd;
+ pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
+ pmu->pebs_data_cfg_rsvd = ~0xff00000full;
+ intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
+ } else {
+ pmu->pebs_enable_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
+ }
+ }
}
static void intel_pmu_init(struct kvm_vcpu *vcpu)
@@ -558,21 +606,24 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
- for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
+ if (!lbr_desc)
+ return;
+
+ for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
pmu->gp_counters[i].current_config = 0;
}
- for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
+ for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUNTERS; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
- pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
+ pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
pmu->fixed_counters[i].current_config = 0;
+ pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
}
- vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
lbr_desc->records.nr = 0;
lbr_desc->event = NULL;
lbr_desc->msr_passthrough = false;
@@ -580,26 +631,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- struct kvm_pmc *pmc = NULL;
- int i;
-
- for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
- pmc = &pmu->gp_counters[i];
-
- pmc_stop_counter(pmc);
- pmc->counter = pmc->eventsel = 0;
- }
-
- for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
- pmc = &pmu->fixed_counters[i];
-
- pmc_stop_counter(pmc);
- pmc->counter = 0;
- }
-
- pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
-
intel_pmu_release_guest_lbr_event(vcpu);
}
@@ -613,11 +644,11 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
*/
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
{
- u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ u64 data = vmx_guest_debugctl_read();
if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
data &= ~DEBUGCTLMSR_LBR;
- vmcs_write64(GUEST_IA32_DEBUGCTL, data);
+ vmx_guest_debugctl_write(vcpu, data);
}
}
@@ -685,9 +716,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ if (WARN_ON_ONCE(!lbr_desc))
+ return;
+
if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu);
- if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+ if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)
goto warn;
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
goto warn;
@@ -704,23 +738,38 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
return;
warn:
- pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
- vcpu->vcpu_id);
+ pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
}
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
- if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+ if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR))
intel_pmu_release_guest_lbr_event(vcpu);
}
-struct kvm_pmu_ops intel_pmu_ops = {
- .pmc_perf_hw_id = intel_pmc_perf_hw_id,
- .pmc_is_enabled = intel_pmc_is_enabled,
- .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
+void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
+{
+ struct kvm_pmc *pmc = NULL;
+ int bit, hw_idx;
+
+ kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
+ if (!pmc_is_locally_enabled(pmc) ||
+ !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
+ continue;
+
+ /*
+ * A negative index indicates the event isn't mapped to a
+ * physical counter in the host, e.g. due to contention.
+ */
+ hw_idx = pmc->perf_event->hw.idx;
+ if (hw_idx != pmc->idx && hw_idx > -1)
+ pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
+ }
+}
+
+struct kvm_pmu_ops intel_pmu_ops __initdata = {
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
- .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
.is_valid_msr = intel_is_valid_msr,
.get_msr = intel_pmu_get_msr,
.set_msr = intel_pmu_set_msr,
@@ -729,4 +778,7 @@ struct kvm_pmu_ops intel_pmu_ops = {
.reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup,
+ .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
+ .MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS,
+ .MIN_NR_GP_COUNTERS = 1,
};