summaryrefslogtreecommitdiff
path: root/include/linux/rcutiny.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcutiny.h')
-rw-r--r--include/linux/rcutiny.h161
1 files changed, 85 insertions, 76 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005ee339e..f519cd680228 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright IBM Corporation, 2008
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
@@ -25,111 +12,133 @@
#ifndef __LINUX_TINY_H
#define __LINUX_TINY_H
-#include <linux/cache.h>
+#include <asm/param.h> /* for HZ */
-static inline void rcu_barrier_bh(void)
-{
- wait_rcu_gp(call_rcu_bh);
-}
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
-static inline void rcu_barrier_sched(void)
-{
- wait_rcu_gp(call_rcu_sched);
-}
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
-static inline void synchronize_rcu_expedited(void)
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
}
-static inline void rcu_barrier(void)
-{
- rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
-}
+unsigned long get_state_synchronize_rcu(void);
-static inline void synchronize_rcu_bh(void)
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
- synchronize_sched();
+ rgosp->rgos_norm = get_state_synchronize_rcu();
}
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched();
-}
+unsigned long start_poll_synchronize_rcu(void);
-static inline void synchronize_sched_expedited(void)
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
- synchronize_sched();
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
}
-static inline void kfree_call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- call_rcu(head, func);
-}
+bool poll_state_synchronize_rcu(unsigned long oldstate);
-static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
- *delta_jiffies = ULONG_MAX;
- return 0;
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
-static inline void rcu_note_context_switch(int cpu)
+static inline void cond_synchronize_rcu(unsigned long oldstate)
{
- rcu_sched_qs(cpu);
+ might_sleep();
}
-/*
- * Take advantage of the fact that there is only one CPU, which
- * allows us to ignore virtualization-based context switches.
- */
-static inline void rcu_virt_note_context_switch(int cpu)
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
+ cond_synchronize_rcu(rgosp->rgos_norm);
}
-/*
- * Return the number of grace periods.
- */
-static inline long rcu_batches_completed(void)
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
- return 0;
+ return start_poll_synchronize_rcu();
}
-/*
- * Return the number of bottom-half grace periods.
- */
-static inline long rcu_batches_completed_bh(void)
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
- return 0;
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
}
-static inline void rcu_force_quiescent_state(void)
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
+ cond_synchronize_rcu(oldstate);
}
-static inline void rcu_bh_force_quiescent_state(void)
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
}
-static inline void rcu_sched_force_quiescent_state(void)
-{
-}
+extern void rcu_barrier(void);
-static inline void rcu_cpu_stall_reset(void)
+static inline void synchronize_rcu_expedited(void)
{
+ synchronize_rcu();
}
-static inline void exit_rcu(void)
+void rcu_qs(void);
+
+static inline void rcu_softirq_qs(void)
{
+ rcu_qs();
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int rcu_scheduler_active __read_mostly;
-extern void rcu_scheduler_starting(void);
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline void rcu_scheduler_starting(void)
+#define rcu_note_context_switch(preempt) \
+ do { \
+ rcu_qs(); \
+ rcu_tasks_qs(current, (preempt)); \
+ } while (0)
+
+static inline int rcu_needs_cpu(void)
{
+ return 0;
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
+/*
+ * Take advantage of the fact that there is only one CPU, which
+ * allows us to ignore virtualization-based context switches.
+ */
+static inline void rcu_virt_note_context_switch(void) { }
+static inline void rcu_cpu_stall_reset(void) { }
+static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
+static inline void rcu_irq_exit_check_preempt(void) { }
+static inline void exit_rcu(void) { }
+static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return false;
+}
+static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
+void rcu_scheduler_starting(void);
+static inline void rcu_end_inkernel_boot(void) { }
+static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
+static inline bool rcu_is_watching(void) { return true; }
+static inline void rcu_momentary_eqs(void) { }
+
+/* Avoid RCU read-side critical sections leaking across. */
+static inline void rcu_all_qs(void) { barrier(); }
+
+/* RCUtree hotplug events */
+#define rcutree_prepare_cpu NULL
+#define rcutree_online_cpu NULL
+#define rcutree_offline_cpu NULL
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */