summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-05-25 23:47:51 +0200
committerPaul E. McKenney <paulmck@kernel.org>2020-06-29 11:59:25 -0700
commit69f08d3999dbef1553a3332b8055282dd3893b6c (patch)
treebf82f8c7540248334a5d51cc192c10550f56fa87 /kernel/rcu
parent952371d6fc0bc360d1d5780f86bb355836117ca2 (diff)
rcu/tree: Use static initializer for krc.lock
The per-CPU variable is initialized at runtime in kfree_rcu_batch_init(). This function is invoked before 'rcu_scheduler_active' is set to 'RCU_SCHEDULER_RUNNING'. After the initialisation, '->initialized' is to true. The raw_spin_lock is only acquired if '->initialized' is set to true. The worqueue item is only used if 'rcu_scheduler_active' set to RCU_SCHEDULER_RUNNING which happens after initialisation. Use a static initializer for krc.lock and remove the runtime initialisation of the lock. Since the lock can now be always acquired, remove the '->initialized' check. Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 368bdc441ffb..a42a4693f161 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3002,7 +3002,7 @@ struct kfree_rcu_cpu_work {
* @lock: Synchronize access to this structure
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
* @monitor_todo: Tracks whether a @monitor_work delayed work is pending
- * @initialized: The @lock and @rcu_work fields have been initialized
+ * @initialized: The @rcu_work fields have been initialized
* @count: Number of objects for which GP not started
*
* This is a per-CPU structure. The reason that it is not included in
@@ -3022,7 +3022,9 @@ struct kfree_rcu_cpu {
int count;
};
-static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc);
+static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
+};
static __always_inline void
debug_rcu_bhead_unqueue(struct kfree_rcu_bulk_data *bhead)
@@ -3042,8 +3044,7 @@ krc_this_cpu_lock(unsigned long *flags)
local_irq_save(*flags); // For safely calling this_cpu_ptr().
krcp = this_cpu_ptr(&krc);
- if (likely(krcp->initialized))
- raw_spin_lock(&krcp->lock);
+ raw_spin_lock(&krcp->lock);
return krcp;
}
@@ -3051,8 +3052,7 @@ krc_this_cpu_lock(unsigned long *flags)
static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
{
- if (likely(krcp->initialized))
- raw_spin_unlock(&krcp->lock);
+ raw_spin_unlock(&krcp->lock);
local_irq_restore(flags);
}
@@ -4278,7 +4278,6 @@ static void __init kfree_rcu_batch_init(void)
for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
- raw_spin_lock_init(&krcp->lock);
for (i = 0; i < KFREE_N_BATCHES; i++) {
INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
krcp->krw_arr[i].krcp = krcp;