summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/pmu.c')
-rw-r--r--arch/arm64/kvm/pmu.c101
1 files changed, 78 insertions, 23 deletions
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..a243934c5568 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -5,7 +5,8 @@
*/
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
-#include <asm/kvm_hyp.h>
+
+static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
/*
* Given the perf event attributes and system type, determine
@@ -25,21 +26,26 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
return (attr->exclude_host != attr->exclude_guest);
}
+struct kvm_pmu_events *kvm_get_pmu_events(void)
+{
+ return this_cpu_ptr(&kvm_pmu_events);
+}
+
/*
* Add events to track that we may want to switch at guest entry/exit
* time.
*/
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
- ctx->pmu_events.events_host |= set;
+ pmu->events_host |= set;
if (!attr->exclude_guest)
- ctx->pmu_events.events_guest |= set;
+ pmu->events_guest |= set;
}
/*
@@ -47,10 +53,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
*/
void kvm_clr_pmu_events(u32 clr)
{
- struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+ struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- ctx->pmu_events.events_host &= ~clr;
- ctx->pmu_events.events_guest &= ~clr;
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
+ pmu->events_host &= ~clr;
+ pmu->events_guest &= ~clr;
}
#define PMEVTYPER_READ_CASE(idx) \
@@ -159,24 +168,27 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
}
/*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
- struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
- events_guest = host->pmu_events.events_guest;
- events_host = host->pmu_events.events_host;
+ preempt_disable();
+ pmu = kvm_get_pmu_events();
+ events_guest = pmu->events_guest;
+ events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
+ preempt_enable();
}
/*
@@ -184,18 +196,61 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
- struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
- events_guest = host->pmu_events.events_guest;
- events_host = host->pmu_events.events_host;
+ pmu = kvm_get_pmu_events();
+ events_guest = pmu->events_guest;
+ events_host = pmu->events_host;
kvm_vcpu_pmu_enable_el0(events_host);
kvm_vcpu_pmu_disable_el0(events_guest);
}
+
+/*
+ * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU
+ * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched
+ * to the value for the guest on vcpu_load(). The value for the host EL0
+ * will be restored on vcpu_put(), before returning to userspace.
+ * This isn't necessary for nVHE, as the register is context switched for
+ * every guest enter/exit.
+ *
+ * Return true if KVM takes care of the register. Otherwise return false.
+ */
+bool kvm_set_pmuserenr(u64 val)
+{
+ struct kvm_cpu_context *hctxt;
+ struct kvm_vcpu *vcpu;
+
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
+ return false;
+
+ vcpu = kvm_get_running_vcpu();
+ if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
+ return false;
+
+ hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
+ return true;
+}
+
+/*
+ * If we interrupted the guest to update the host PMU context, make
+ * sure we re-apply the guest EL0 state.
+ */
+void kvm_vcpu_pmu_resync_el0(void)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!has_vhe() || !in_interrupt())
+ return;
+
+ vcpu = kvm_get_running_vcpu();
+ if (!vcpu)
+ return;
+
+ kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
+}