diff options
| author | Paul E. McKenney <paulmck@kernel.org> | 2025-11-02 14:49:44 -0800 |
|---|---|---|
| committer | Frederic Weisbecker <frederic@kernel.org> | 2025-11-07 14:37:17 +0100 |
| commit | 057df3eaca289365e28f413d1f30c63819719076 (patch) | |
| tree | c6f05f4a1454100216b8fc7c8df75622932f3671 | |
| parent | 78a731cefce65a9aa56ec5ee57347672b9aa9119 (diff) | |
refscale: Add preempt_disable() readers
This commit adds refscale readers based on preempt_disable() and
preempt_enable() ("refscale.scale_type=preempt"). On my x86 laptop, these
are about 2.8ns.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
| -rw-r--r-- | kernel/rcu/refscale.c | 33 |
1 files changed, 32 insertions, 1 deletions
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 995c189efaf0..8f9cd6eff2b5 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -629,6 +629,37 @@ static const struct ref_scale_ops jiffies_ops = { .name = "jiffies" }; +static void ref_preempt_section(const int nloops) +{ + int i; + + migrate_disable(); + for (i = nloops; i >= 0; i--) { + preempt_disable(); + preempt_enable(); + } + migrate_enable(); +} + +static void ref_preempt_delay_section(const int nloops, const int udl, const int ndl) +{ + int i; + + migrate_disable(); + for (i = nloops; i >= 0; i--) { + preempt_disable(); + un_delay(udl, ndl); + preempt_enable(); + } + migrate_enable(); +} + +static const struct ref_scale_ops preempt_ops = { + .readsection = ref_preempt_section, + .delaysection = ref_preempt_delay_section, + .name = "preempt" +}; + static void ref_bh_section(const int nloops) { int i; @@ -1261,7 +1292,7 @@ ref_scale_init(void) &rcu_ops, &srcu_ops, &srcu_fast_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &sched_clock_ops, &clock_ops, &jiffies_ops, - &bh_ops, &irq_ops, &irqsave_ops, + &preempt_ops, &bh_ops, &irq_ops, &irqsave_ops, &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, }; |
