summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2018-11-29 09:15:54 -0800
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-01-25 15:28:27 -0800
commit142d106d5e62ff2cf0dfd2dfe1adfcaff1c2ed85 (patch)
tree346f24be4070b45272114351689d5f962f265f07 /kernel/rcu
parentc46f497a6151d48cb341e18fdd4dff345f7d253d (diff)
rcu: Determine expedited-GP IPI handler at build time
Back when there could be multiple RCU flavors running in the same kernel at the same time, it was necessary to specify the expedited grace-period IPI handler at runtime. Now that there is only one RCU flavor, the IPI handler can be determined at build time. There is therefore no longer any reason for the RCU-preempt and RCU-sched IPI handlers to have different names, nor is there any reason to pass these handlers in function arguments and in the data structures enclosing workqueues. This commit therefore makes all these changes, pushing the specification of the expedited grace-period IPI handler down to the point of use. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_exp.h30
2 files changed, 14 insertions, 17 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bcfd684a5c57..50bb41cdc5fb 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -36,7 +36,6 @@
/* Communicate arguments to a workqueue handler. */
struct rcu_exp_work {
- smp_call_func_t rew_func;
unsigned long rew_s;
struct work_struct rew_work;
};
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 6d4eb4694b6f..7f5cb4228b59 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -22,6 +22,8 @@
#include <linux/lockdep.h>
+static void rcu_exp_handler(void *unused);
+
/*
* Record the start of an expedited grace period.
*/
@@ -344,7 +346,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
{
int cpu;
unsigned long flags;
- smp_call_func_t func;
unsigned long mask_ofl_test;
unsigned long mask_ofl_ipi;
int ret;
@@ -352,7 +353,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
container_of(wp, struct rcu_exp_work, rew_work);
struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
- func = rewp->rew_func;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
/* Each pass checks a CPU for identity, offline, and idle. */
@@ -396,7 +396,7 @@ retry_ipi:
mask_ofl_test |= mask;
continue;
}
- ret = smp_call_function_single(cpu, func, NULL, 0);
+ ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;
continue;
@@ -426,7 +426,7 @@ retry_ipi:
* Select the nodes that the upcoming expedited grace period needs
* to wait for.
*/
-static void sync_rcu_exp_select_cpus(smp_call_func_t func)
+static void sync_rcu_exp_select_cpus(void)
{
int cpu;
struct rcu_node *rnp;
@@ -440,7 +440,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- rnp->rew.rew_func = func;
if (!READ_ONCE(rcu_par_gp_wq) ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
@@ -580,10 +579,10 @@ static void rcu_exp_wait_wake(unsigned long s)
* Common code to drive an expedited grace period forward, used by
* workqueues and mid-boot-time tasks.
*/
-static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
+static void rcu_exp_sel_wait_wake(unsigned long s)
{
/* Initialize the rcu_node tree in preparation for the wait. */
- sync_rcu_exp_select_cpus(func);
+ sync_rcu_exp_select_cpus();
/* Wait and clean up, including waking everyone. */
rcu_exp_wait_wake(s);
@@ -597,14 +596,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
struct rcu_exp_work *rewp;
rewp = container_of(wp, struct rcu_exp_work, rew_work);
- rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
+ rcu_exp_sel_wait_wake(rewp->rew_s);
}
/*
* Given a smp_call_function() handler, kick off the specified
* implementation of expedited grace period.
*/
-static void _synchronize_rcu_expedited(smp_call_func_t func)
+static void _synchronize_rcu_expedited(void)
{
struct rcu_data *rdp;
struct rcu_exp_work rew;
@@ -625,10 +624,9 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
/* Ensure that load happens before action based on it. */
if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
/* Direct call during scheduler init and early_initcalls(). */
- rcu_exp_sel_wait_wake(func, s);
+ rcu_exp_sel_wait_wake(s);
} else {
/* Marshall arguments & schedule the expedited grace period. */
- rew.rew_func = func;
rew.rew_s = s;
INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
queue_work(rcu_gp_wq, &rew.rew_work);
@@ -654,7 +652,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
* ->expmask fields in the rcu_node tree. Otherwise, immediately
* report the quiescent state.
*/
-static void sync_rcu_exp_handler(void *unused)
+static void rcu_exp_handler(void *unused)
{
unsigned long flags;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -760,14 +758,14 @@ void synchronize_rcu_expedited(void)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
- _synchronize_rcu_expedited(sync_rcu_exp_handler);
+ _synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */
-static void sync_sched_exp_handler(void *unused)
+static void rcu_exp_handler(void *unused)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -799,7 +797,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
return;
- ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
+ ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
WARN_ON_ONCE(ret);
}
@@ -835,7 +833,7 @@ void synchronize_rcu_expedited(void)
if (rcu_blocking_is_gp())
return;
- _synchronize_rcu_expedited(sync_sched_exp_handler);
+ _synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);