summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLi RongQing <lirongqing@baidu.com>2025-03-06 13:11:02 +0800
committerIngo Molnar <mingo@kernel.org>2025-03-06 22:42:26 +0100
commit7a310c644cf571fbdb1d447a1dc39cf048634589 (patch)
treecbb30b31daebb68a368dd3b3a5d2d094f19de2e0
parentfa6192adc32f4fdfe5b74edd5b210e12afd6ecc0 (diff)
perf/x86/intel/bts: Check if bts_ctx is allocated when calling BTS functions
bts_ctx might not be allocated, for example if the CPU has X86_FEATURE_PTI, but intel_bts_disable/enable_local() and intel_bts_interrupt() are called unconditionally from intel_pmu_handle_irq() and crash on bts_ctx. So check if bts_ctx is allocated when calling BTS functions. Fixes: 3acfcefa795c ("perf/x86/intel/bts: Allocate bts_ctx only if necessary") Reported-by: Jiri Olsa <olsajiri@gmail.com> Tested-by: Jiri Olsa <jolsa@kernel.org> Suggested-by: Adrian Hunter <adrian.hunter@intel.com> Suggested-by: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Li RongQing <lirongqing@baidu.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20250306051102.2642-1-lirongqing@baidu.com
-rw-r--r--arch/x86/events/intel/bts.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 953868da82fb..39a987d5eb6e 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -338,9 +338,14 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- int state = READ_ONCE(bts->state);
+ struct bts_ctx *bts;
+ int state;
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
+ state = READ_ONCE(bts->state);
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
@@ -358,7 +363,12 @@ void intel_bts_enable_local(void)
void intel_bts_disable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
+ struct bts_ctx *bts;
+
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
@@ -450,12 +460,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
- struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
- struct perf_event *event = bts->handle.event;
+ struct bts_ctx *bts;
+ struct perf_event *event;
struct bts_buffer *buf;
s64 old_head;
int err = -ENOSPC, handled = 0;
+ if (!bts_ctx)
+ return 0;
+
+ bts = this_cpu_ptr(bts_ctx);
+ event = bts->handle.event;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.