summaryrefslogtreecommitdiff
path: root/arch/x86/events/intel/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-05-10 21:28:25 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-09-07 21:54:02 +0200
commit28f0f3c44b5c35be657a4f922dcdfb48285f4373 (patch)
tree0be9accbe824b6ff579eb094cff959a09c90ebd8 /arch/x86/events/intel/core.c
parente577bb17a1eaa35b86ee873a786e603be768d668 (diff)
perf/x86: Change x86_pmu::limit_period signature
In preparation for making it a static_call, change the signature. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220829101321.573713839@infradead.org
Diffstat (limited to 'arch/x86/events/intel/core.c')
-rw-r--r--arch/x86/events/intel/core.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index feed732fdf57..92cc390590d1 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4344,28 +4344,25 @@ static u8 adl_get_hybrid_cpu_type(void)
* Therefore the effective (average) period matches the requested period,
* despite coarser hardware granularity.
*/
-static u64 bdw_limit_period(struct perf_event *event, u64 left)
+static void bdw_limit_period(struct perf_event *event, s64 *left)
{
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) {
- if (left < 128)
- left = 128;
- left &= ~0x3fULL;
+ if (*left < 128)
+ *left = 128;
+ *left &= ~0x3fULL;
}
- return left;
}
-static u64 nhm_limit_period(struct perf_event *event, u64 left)
+static void nhm_limit_period(struct perf_event *event, s64 *left)
{
- return max(left, 32ULL);
+ *left = max(*left, 32LL);
}
-static u64 spr_limit_period(struct perf_event *event, u64 left)
+static void spr_limit_period(struct perf_event *event, s64 *left)
{
if (event->attr.precise_ip == 3)
- return max(left, 128ULL);
-
- return left;
+ *left = max(*left, 128LL);
}
PMU_FORMAT_ATTR(event, "config:0-7" );