summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_exp.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree_exp.h')
-rw-r--r--kernel/rcu/tree_exp.h59
1 files changed, 9 insertions, 50 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index c36c7d5575ca..6058a734090c 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -141,6 +141,13 @@ static void __maybe_unused sync_exp_reset_tree(void)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
WARN_ON_ONCE(rnp->expmask);
WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
+ /*
+ * Need to wait for any blocked tasks as well. Note that
+ * additional blocking tasks will also block the expedited GP
+ * until such time as the ->expmask bits are cleared.
+ */
+ if (rcu_is_leaf_node(rnp) && rcu_preempt_has_tasks(rnp))
+ WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
}
@@ -393,13 +400,6 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
}
mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
- /*
- * Need to wait for any blocked tasks as well. Note that
- * additional blocking tasks will also block the expedited GP
- * until such time as the ->expmask bits are cleared.
- */
- if (rcu_preempt_has_tasks(rnp))
- WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* IPI the remaining CPUs for expedited quiescent state. */
@@ -751,12 +751,8 @@ static void rcu_exp_handler(void *unused)
struct task_struct *t = current;
/*
- * First, is there no need for a quiescent state from this CPU,
- * or is this CPU already looking for a quiescent state for the
- * current grace period? If either is the case, just leave.
- * However, this should not happen due to the preemptible
- * sync_sched_exp_online_cleanup() implementation being a no-op,
- * so warn if this does happen.
+ * WARN if the CPU is unexpectedly already looking for a
+ * QS or has already reported one.
*/
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
@@ -803,11 +799,6 @@ static void rcu_exp_handler(void *unused)
WARN_ON_ONCE(1);
}
-/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
-static void sync_sched_exp_online_cleanup(int cpu)
-{
-}
-
/*
* Scan the current list of tasks blocked within RCU read-side critical
* sections, printing out the tid of each that is blocking the current
@@ -885,38 +876,6 @@ static void rcu_exp_handler(void *unused)
rcu_exp_need_qs();
}
-/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
-static void sync_sched_exp_online_cleanup(int cpu)
-{
- unsigned long flags;
- int my_cpu;
- struct rcu_data *rdp;
- int ret;
- struct rcu_node *rnp;
-
- rdp = per_cpu_ptr(&rcu_data, cpu);
- rnp = rdp->mynode;
- my_cpu = get_cpu();
- /* Quiescent state either not needed or already requested, leave. */
- if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
- READ_ONCE(rdp->cpu_no_qs.b.exp)) {
- put_cpu();
- return;
- }
- /* Quiescent state needed on current CPU, so set it up locally. */
- if (my_cpu == cpu) {
- local_irq_save(flags);
- rcu_exp_need_qs();
- local_irq_restore(flags);
- put_cpu();
- return;
- }
- /* Quiescent state needed on some other CPU, send IPI. */
- ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
- put_cpu();
- WARN_ON_ONCE(ret);
-}
-
/*
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections that are