summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-06-18 14:25:59 +0100
committerMarc Zyngier <maz@kernel.org>2021-06-18 14:25:59 +0100
commitcb5faa8c7df02a83dd18d8b5c4090a69e93523ec (patch)
treebb6f3f42b86e64378409781a7c4870f1f64f8559 /arch/arm64/kvm
parentb88835a89df7083510478896caafbf7292cea760 (diff)
parentd0c94c49792cf780cbfefe29f81bb8c3b73bc76b (diff)
Merge branch kvm-arm64/pmu-fixes into kvmarm-master/next
Fixes for the PMUv3 emulation of PMCR_EL0: - Don't spuriously reset the cycle counter when resetting other counters - Force PMCR_EL0 to become effective after having restored it * kvm-arm64/pmu-fixes: KVM: arm64: Restore PMU configuration on first run KVM: arm64: Don't zero the cycle count register when PMCR_EL0.P is set
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/arm.c4
-rw-r--r--arch/arm64/kvm/pmu-emul.c4
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e720148232a0..facf4d41d32a 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -689,6 +689,10 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
vgic_v4_load(vcpu);
preempt_enable();
}
+
+ if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
+ kvm_pmu_handle_pmcr(vcpu,
+ __vcpu_sys_reg(vcpu, PMCR_EL0));
}
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index fd167d4f4215..f33825c995cb 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
+ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
@@ -850,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /* One-off reload of the PMU on first run */
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
return 0;
}