summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-10 14:00:14 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-30 16:03:37 -0700
commit395a2f097ebdddf2bfa286b6119f1b231025c2f1 (patch)
tree6c4852de5a7125c22cbf0631bf288e64862b52af /kernel/rcu/tree.c
parent4d232dfe1df35254298e7986c4de8c9f63f58c79 (diff)
rcu: Define rcu_all_qs() only in !PREEMPT builds
Now that rcu_all_qs() is used only in !PREEMPT builds, move it to tree_plugin.h so that it is defined only in those builds. This in turn means that rcu_momentary_dyntick_idle() is only used in !PREEMPT builds, but it is simply marked __maybe_unused in order to keep it near the rest of the dyntick-idle code. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c41
1 files changed, 1 insertions, 40 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c8761e7c7c00..e140aaa78527 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -356,7 +356,7 @@ bool rcu_eqs_special_set(int cpu)
*
* The caller must have disabled interrupts and must not be idle.
*/
-static void rcu_momentary_dyntick_idle(void)
+static void __maybe_unused rcu_momentary_dyntick_idle(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
int special;
@@ -381,45 +381,6 @@ static int rcu_is_cpu_rrupt_from_idle(void)
__this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
}
-/*
- * Register an urgently needed quiescent state. If there is an
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
- * dyntick-idle quiescent state visible to other CPUs, which will in
- * some cases serve for expedited as well as normal grace periods.
- * Either way, register a lightweight quiescent state.
- *
- * The barrier() calls are redundant in the common case when this is
- * called externally, but just in case this is called from within this
- * file.
- *
- */
-void rcu_all_qs(void)
-{
- unsigned long flags;
-
- if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
- return;
- preempt_disable();
- /* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
- preempt_enable();
- return;
- }
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
- local_irq_save(flags);
- rcu_momentary_dyntick_idle();
- local_irq_restore(flags);
- }
- if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp)))
- rcu_qs();
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(rcu_all_qs);
-
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
static long blimit = DEFAULT_RCU_BLIMIT;
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */