summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-02-28 16:52:10 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-02-28 16:52:10 -0800
commit766331f2860b08695418109582c94e98cc3528fe (patch)
tree4ef19126b4ac17dd304a94b3f11f09808433d871 /kernel
parentad69e021288d04f297c097985513306cbd304be3 (diff)
parentf8c857238a392f21d5726d07966f6061007c8d4f (diff)
Merge tag 'perf-urgent-2025-02-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf event fixes from Ingo Molnar: "Miscellaneous perf events fixes and a minor HW enablement change: - Fix missing RCU protection in perf_iterate_ctx() - Fix pmu_ctx_list ordering bug - Reject the zero page in uprobes - Fix a family of bugs related to low frequency sampling - Add Intel Arrow Lake U CPUs to the generic Arrow Lake RAPL support table - Fix a lockdep-assert false positive in uretprobes" * tag 'perf-urgent-2025-02-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: uprobes: Remove too strict lockdep_assert() condition in hprobe_expire() perf/x86/rapl: Add support for Intel Arrow Lake U perf/x86/intel: Use better start period for frequency mode perf/core: Fix low freq setting via IOC_PERIOD perf/x86: Fix low freqency setting issue uprobes: Reject the shared zeropage in uprobe_write_opcode() perf/core: Order the PMU list to fix warning about unordered pmu_ctx_list perf/core: Add RCU read lock protection to perf_iterate_ctx()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c31
-rw-r--r--kernel/events/uprobes.c15
2 files changed, 32 insertions, 14 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bcb09e011e9e..6364319e2f88 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4950,7 +4950,7 @@ static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
struct perf_event *event)
{
- struct perf_event_pmu_context *new = NULL, *epc;
+ struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
void *task_ctx_data = NULL;
if (!ctx->task) {
@@ -5007,12 +5007,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
atomic_inc(&epc->refcount);
goto found_epc;
}
+ /* Make sure the pmu_ctx_list is sorted by PMU type: */
+ if (!pos && epc->pmu->type > pmu->type)
+ pos = epc;
}
epc = new;
new = NULL;
- list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ if (!pos)
+ list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ else
+ list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
+
epc->ctx = ctx;
found_epc:
@@ -5962,14 +5969,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
if (!value)
return -EINVAL;
- if (event->attr.freq && value > sysctl_perf_event_sample_rate)
- return -EINVAL;
-
- if (perf_event_check_period(event, value))
- return -EINVAL;
-
- if (!event->attr.freq && (value & (1ULL << 63)))
- return -EINVAL;
+ if (event->attr.freq) {
+ if (value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+ } else {
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+ if (value & (1ULL << 63))
+ return -EINVAL;
+ }
event_function_call(event, __perf_event_period, &value);
@@ -8321,7 +8329,8 @@ void perf_event_exec(void)
perf_event_enable_on_exec(ctx);
perf_event_remove_on_exec(ctx);
- perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+ scoped_guard(rcu)
+ perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
perf_unpin_context(ctx);
put_ctx(ctx);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index bf2a87a0a378..b4ca8898fe17 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -495,6 +495,11 @@ retry:
if (ret <= 0)
goto put_old;
+ if (is_zero_page(old_page)) {
+ ret = -EINVAL;
+ goto put_old;
+ }
+
if (WARN(!is_register && PageCompound(old_page),
"uprobe unregister should never work on compound page\n")) {
ret = -EINVAL;
@@ -762,10 +767,14 @@ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
enum hprobe_state hstate;
/*
- * return_instance's hprobe is protected by RCU.
- * Underlying uprobe is itself protected from reuse by SRCU.
+ * Caller should guarantee that return_instance is not going to be
+ * freed from under us. This can be achieved either through holding
+ * rcu_read_lock() or by owning return_instance in the first place.
+ *
+ * Underlying uprobe is itself protected from reuse by SRCU, so ensure
+ * SRCU lock is held properly.
*/
- lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
+ lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
hstate = READ_ONCE(hprobe->state);
switch (hstate) {