diff options
| author | Paul E. McKenney <paulmck@kernel.org> | 2025-11-02 14:49:47 -0800 |
|---|---|---|
| committer | Frederic Weisbecker <frederic@kernel.org> | 2025-11-07 14:37:17 +0100 |
| commit | 204ab51445a72eab2b74165061282c868573f59c (patch) | |
| tree | 2a077d9a6d990f0fc415d0929d49ad333fa50e5a | |
| parent | 448b66a7aaf33cf52dc47dd7807652ce827e8dfd (diff) | |
refscale: Do not disable interrupts for tests involving local_bh_enable()
Some kernel configurations prohibit invoking local_bh_enable() while
interrupts are disabled. However, refscale disables interrupts to reduce
OS noise during the tests, which results in splats. This commit therefore
adds an ->enable_irqs flag to the ref_scale_ops structure, and refrains
from disabling interrupts when that flag is set. This flag is set for
the "bh" and "incpercpubh" scale_type module-parameter values.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
| -rw-r--r-- | kernel/rcu/refscale.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 582f730632fc..613b0e0d2130 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -136,6 +136,7 @@ struct ref_scale_ops { void (*cleanup)(void); void (*readsection)(const int nloops); void (*delaysection)(const int nloops, const int udl, const int ndl); + bool enable_irqs; const char *name; }; @@ -488,6 +489,7 @@ static const struct ref_scale_ops incpercpubh_ops = { .init = rcu_sync_scale_init, .readsection = ref_incpercpubh_section, .delaysection = ref_incpercpubh_delay_section, + .enable_irqs = true, .name = "incpercpubh" }; @@ -865,6 +867,7 @@ static void ref_bh_delay_section(const int nloops, const int udl, const int ndl) static const struct ref_scale_ops bh_ops = { .readsection = ref_bh_section, .delaysection = ref_bh_delay_section, + .enable_irqs = true, .name = "bh" }; @@ -1227,15 +1230,18 @@ repeat: if (!atomic_dec_return(&n_warmedup)) while (atomic_read_acquire(&n_warmedup)) rcu_scale_one_reader(); - // Also keep interrupts disabled. This also has the effect - // of preventing entries into slow path for rcu_read_unlock(). - local_irq_save(flags); + // Also keep interrupts disabled when it is safe to do so, which + // it is not for local_bh_enable(). This also has the effect of + // preventing entries into slow path for rcu_read_unlock(). + if (!cur_ops->enable_irqs) + local_irq_save(flags); start = ktime_get_mono_fast_ns(); rcu_scale_one_reader(); duration = ktime_get_mono_fast_ns() - start; - local_irq_restore(flags); + if (!cur_ops->enable_irqs) + local_irq_restore(flags); rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration; // To reduce runtime-skew noise, do maintain-load invocations until |
