summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/pmu-emul.c43
1 files changed, 31 insertions, 12 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 69b67ab3c4bf..d050143326b5 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -51,13 +51,19 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
*/
static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
{
+ return (select_idx == ARMV8_PMU_CYCLE_IDX);
+}
+
+static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
+{
return (select_idx == ARMV8_PMU_CYCLE_IDX &&
__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
}
static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
{
- return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX);
+ return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
+ !kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
}
static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
@@ -97,7 +103,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
counter += perf_event_read_value(pmc->perf_event, &enabled,
&running);
- if (select_idx != ARMV8_PMU_CYCLE_IDX)
+ if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
counter = lower_32_bits(counter);
return counter;
@@ -423,6 +429,23 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
}
}
+/* Compute the sample period for a given counter value */
+static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
+{
+ u64 val;
+
+ if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
+ if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
+ val = -(counter & GENMASK(31, 0));
+ else
+ val = (-counter) & GENMASK(63, 0);
+ } else {
+ val = (-counter) & GENMASK(31, 0);
+ }
+
+ return val;
+}
+
/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
@@ -442,10 +465,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
- period = -(local64_read(&perf_event->count));
-
- if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
- period &= GENMASK(31, 0);
+ period = compute_period(vcpu, idx, local64_read(&perf_event->count));
local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
@@ -571,14 +591,13 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
/*
* If counting with a 64bit counter, advertise it to the perf
- * code, carefully dealing with the initial sample period.
+ * code, carefully dealing with the initial sample period
+ * which also depends on the overflow.
*/
- if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
+ if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
- attr.sample_period = (-counter) & GENMASK(63, 0);
- } else {
- attr.sample_period = (-counter) & GENMASK(31, 0);
- }
+
+ attr.sample_period = compute_period(vcpu, select_idx, counter);
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, pmc);