summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-04-12 15:58:34 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:35:49 -0700
commitce5215c1342c6c89b3c3c45fea82cddf0b013787 (patch)
tree06943157837341c47eadb469cd225f5966f4c9ed /kernel/rcu/tree_plugin.h
parent1bb5f9b95afe5d9d6b586389ce5e8f461a5b671c (diff)
rcu/nocb: Use separate flag to indicate offloaded ->cblist
RCU callback processing currently uses rcu_is_nocb_cpu() to determine whether or not the current CPU's callbacks are to be offloaded. This works, but it is not so good for cache locality. Plus use of ->cblist for offloaded callbacks will greatly increase the frequency of these checks. This commit therefore adds a ->offloaded flag to the rcu_segcblist structure to provide a more flexible and cache-friendly means of checking for callback offloading. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b8a43cf9bb4e..fc6133eed50a 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void)
int tne;
lockdep_assert_irqs_disabled();
- if (rcu_is_nocb_cpu(smp_processor_id()))
+ if (rcu_segcblist_is_offloaded(&rdp->cblist))
return;
/* Handle nohz enablement switches conservatively. */
@@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void)
*/
static void rcu_cleanup_after_idle(void)
{
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+
lockdep_assert_irqs_disabled();
- if (rcu_is_nocb_cpu(smp_processor_id()))
+ if (rcu_segcblist_is_offloaded(&rdp->cblist))
return;
if (rcu_try_advance_all_cbs())
invoke_rcu_core();
@@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy, unsigned long flags)
{
- if (!rcu_is_nocb_cpu(rdp->cpu))
+ if (!rcu_segcblist_is_offloaded(&rdp->cblist))
return false;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
@@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
unsigned long flags)
{
lockdep_assert_irqs_disabled();
- if (!rcu_is_nocb_cpu(smp_processor_id()))
+ if (!rcu_segcblist_is_offloaded(&my_rdp->cblist))
return false; /* Not NOCBs CPU, caller must migrate CBs. */
__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
rcu_segcblist_tail(&rdp->cblist),
@@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
}
rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_disable(&rdp->cblist);
+ rcu_segcblist_offload(&rdp->cblist);
return true;
}