diff options
| author | Andrea Righi <arighi@nvidia.com> | 2025-04-22 10:26:33 +0200 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2025-04-22 09:28:12 -1000 | 
| commit | a11d6784d7316a6c77ca9f14fb1a698ebbb3c1fb (patch) | |
| tree | 109ae3d3e3963e99d31b791aa0d64e789514c42b | |
| parent | 18853ba782bef65fc81ef2b3370382e5b479c5eb (diff) | |
sched_ext: Fix missing rq lock in scx_bpf_cpuperf_set()
scx_bpf_cpuperf_set() can be used to set a performance target level on
any CPU. However, it doesn't correctly acquire the corresponding rq
lock, which may lead to unsafe behavior and trigger the following
warning, due to the lockdep_assert_rq_held() check:
[   51.713737] WARNING: CPU: 3 PID: 3899 at kernel/sched/sched.h:1512 scx_bpf_cpuperf_set+0x1a0/0x1e0
...
[   51.713836] Call trace:
[   51.713837]  scx_bpf_cpuperf_set+0x1a0/0x1e0 (P)
[   51.713839]  bpf_prog_62d35beb9301601f_bpfland_init+0x168/0x440
[   51.713841]  bpf__sched_ext_ops_init+0x54/0x8c
[   51.713843]  scx_ops_enable.constprop.0+0x2c0/0x10f0
[   51.713845]  bpf_scx_reg+0x18/0x30
[   51.713847]  bpf_struct_ops_link_create+0x154/0x1b0
[   51.713849]  __sys_bpf+0x1934/0x22a0
Fix by properly acquiring the rq lock when possible or raising an error
if we try to operate on a CPU that is not the one currently locked.
Fixes: d86adb4fc0655 ("sched_ext: Add cpuperf support")
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Acked-by: Changwoo Min <changwoo@igalia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
| -rw-r--r-- | kernel/sched/ext.c | 27 | 
1 files changed, 23 insertions, 4 deletions
| diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 585bf6d8238b..ac79067dc87e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -7113,13 +7113,32 @@ __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)  	}  	if (ops_cpu_valid(cpu, NULL)) { -		struct rq *rq = cpu_rq(cpu); +		struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq(); +		struct rq_flags rf; + +		/* +		 * When called with an rq lock held, restrict the operation +		 * to the corresponding CPU to prevent ABBA deadlocks. +		 */ +		if (locked_rq && rq != locked_rq) { +			scx_ops_error("Invalid target CPU %d", cpu); +			return; +		} + +		/* +		 * If no rq lock is held, allow to operate on any CPU by +		 * acquiring the corresponding rq lock. +		 */ +		if (!locked_rq) { +			rq_lock_irqsave(rq, &rf); +			update_rq_clock(rq); +		}  		rq->scx.cpuperf_target = perf; +		cpufreq_update_util(rq, 0); -		rcu_read_lock_sched_notrace(); -		cpufreq_update_util(cpu_rq(cpu), 0); -		rcu_read_unlock_sched_notrace(); +		if (!locked_rq) +			rq_unlock_irqrestore(rq, &rf);  	}  } | 
