diff options
author | Andrea Righi <arighi@nvidia.com> | 2025-06-05 11:30:26 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2025-06-09 06:25:35 -1000 |
commit | 086ed90a6453873d4c5d51a18c26b3548af4fa24 (patch) | |
tree | 9a40aed67a473df0b5f0021447a5bc43f0dd6b3f /kernel/sched/ext.c | |
parent | e212743bd727c3fcffcd73b6c1d906546ee83805 (diff) |
sched_ext: Make scx_locked_rq() inline
scx_locked_rq() is used both from ext.c and ext_idle.c, move it to ext.h
as a static inline function.
No functional changes.
v2: Rename locked_rq to scx_locked_rq_state, expose it and make
scx_locked_rq() inline, as suggested by Tejun.
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/ext.c')
-rw-r--r-- | kernel/sched/ext.c | 13 |
1 files changed, 2 insertions, 11 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 3e483138dff6..3623ba98d7d8 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1247,7 +1247,7 @@ static void scx_kf_disallow(u32 mask) * This allows kfuncs to safely operate on rq from any scx ops callback, * knowing which rq is already locked. */ -static DEFINE_PER_CPU(struct rq *, locked_rq); +DEFINE_PER_CPU(struct rq *, scx_locked_rq_state); static inline void update_locked_rq(struct rq *rq) { @@ -1258,16 +1258,7 @@ static inline void update_locked_rq(struct rq *rq) */ if (rq) lockdep_assert_rq_held(rq); - __this_cpu_write(locked_rq, rq); -} - -/* - * Return the rq currently locked from an scx callback, or NULL if no rq is - * locked. - */ -static inline struct rq *scx_locked_rq(void) -{ - return __this_cpu_read(locked_rq); + __this_cpu_write(scx_locked_rq_state, rq); } #define SCX_CALL_OP(sch, mask, op, rq, args...) \ |