summaryrefslogtreecommitdiff
path: root/arch/x86/events/intel/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-05-10 21:28:06 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-09-07 21:54:02 +0200
commite577bb17a1eaa35b86ee873a786e603be768d668 (patch)
tree955c586b2da81106e82c297359d0360384e36633 /arch/x86/events/intel/core.c
parent73759c346341d39dfde39701476c0376dea0a98b (diff)
perf/x86/intel: Move the topdown stuff into the intel driver
Use the new x86_pmu::{set_period,update}() methods to push the topdown stuff into the Intel driver, where it belongs. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220829101321.505933457@infradead.org
Diffstat (limited to 'arch/x86/events/intel/core.c')
-rw-r--r--arch/x86/events/intel/core.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ba101c28dcc9..feed732fdf57 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2302,7 +2302,7 @@ static void intel_pmu_nhm_workaround(void)
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event)
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
}
for (i = 0; i < 4; i++) {
@@ -2317,7 +2317,7 @@ static void intel_pmu_nhm_workaround(void)
event = cpuc->events[i];
if (event) {
- x86_perf_event_set_period(event);
+ static_call(x86_pmu_set_period)(event);
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
} else
@@ -2794,7 +2794,7 @@ static void intel_pmu_add_event(struct perf_event *event)
*/
int intel_pmu_save_and_restart(struct perf_event *event)
{
- x86_perf_event_update(event);
+ static_call(x86_pmu_update)(event);
/*
* For a checkpointed counter always reset back to 0. This
* avoids a situation where the counter overflows, aborts the
@@ -2806,9 +2806,27 @@ int intel_pmu_save_and_restart(struct perf_event *event)
wrmsrl(event->hw.event_base, 0);
local64_set(&event->hw.prev_count, 0);
}
+ return static_call(x86_pmu_set_period)(event);
+}
+
+static int intel_pmu_set_period(struct perf_event *event)
+{
+ if (unlikely(is_topdown_count(event)) &&
+ x86_pmu.set_topdown_event_period)
+ return x86_pmu.set_topdown_event_period(event);
+
return x86_perf_event_set_period(event);
}
+static u64 intel_pmu_update(struct perf_event *event)
+{
+ if (unlikely(is_topdown_count(event)) &&
+ x86_pmu.update_topdown_event)
+ return x86_pmu.update_topdown_event(event);
+
+ return x86_perf_event_update(event);
+}
+
static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
@@ -4786,6 +4804,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.add = intel_pmu_add_event,
.del = intel_pmu_del_event,
.read = intel_pmu_read_event,
+ .set_period = intel_pmu_set_period,
+ .update = intel_pmu_update,
.hw_config = intel_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,