summaryrefslogtreecommitdiff
path: root/include/linux/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/smp.h')
-rw-r--r--include/linux/smp.h176
1 files changed, 128 insertions, 48 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h
index a56f08ff3097..91d0ecf3b8d3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -12,57 +12,108 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/init.h>
-#include <linux/llist.h>
+#include <linux/smp_types.h>
typedef void (*smp_call_func_t)(void *info);
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
+
+/*
+ * structure shares (partial) layout with struct irq_work
+ */
struct __call_single_data {
- struct llist_node llist;
+ struct __call_single_node node;
smp_call_func_t func;
void *info;
- unsigned int flags;
};
+#define CSD_INIT(_func, _info) \
+ (struct __call_single_data){ .func = (_func), .info = (_info), }
+
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data));
+#define INIT_CSD(_csd, _func, _info) \
+do { \
+ *(_csd) = CSD_INIT((_func), (_info)); \
+} while (0)
+
+/*
+ * Enqueue a llist_node on the call_single_queue; be very careful, read
+ * flush_smp_call_function_queue() in detail.
+ */
+extern void __smp_call_single_queue(int cpu, struct llist_node *node);
+
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait, const struct cpumask *mask);
+
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
/*
- * Call a function on all processors
+ * Cpus stopping functions in panic. All have default weak definitions.
+ * Architecture-dependent code may override them.
*/
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
+void __noreturn panic_smp_self_stop(void);
+void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
+void crash_smp_send_stop(void);
/*
- * Call a function on processors specified by mask, which might include
- * the local one.
+ * Call a function on all processors
*/
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
+static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
+}
+
+/**
+ * on_each_cpu_mask(): Run a function on processors specified by
+ * cpumask, which may include the local processor.
+ * @mask: The set of cpus to run on (only runs on online subset).
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
+ *
+ * If @wait is true, then returns once @func has returned.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
+ */
+static inline void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(NULL, func, info, wait, mask);
+}
/*
* Call a function on each processor for which the supplied function
* cond_func returns a positive value. This may include the local
- * processor.
+ * processor. May be used during early boot while early_boot_irqs_disabled is
+ * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
*/
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
-
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags, const struct cpumask *mask);
+static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
+ smp_call_func_t func, void *info, bool wait)
+{
+ on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+}
-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+/*
+ * Architecture specific boot CPU setup. Defined as empty weak function in
+ * init/main.c. Architectures can override it.
+ */
+void __init smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP
#include <linux/preempt.h>
-#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
@@ -80,8 +131,15 @@ extern void smp_send_stop(void);
/*
* sends a 'reschedule' event to another CPU:
*/
-extern void smp_send_reschedule(int cpu);
-
+extern void arch_smp_send_reschedule(int cpu);
+/*
+ * scheduler_ipi() is inline so can't be passed as callback reason, but the
+ * callsite IP should be sufficient for root-causing IPIs sent from here.
+ */
+#define smp_send_reschedule(cpu) ({ \
+ trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
+ arch_smp_send_reschedule(cpu); \
+})
/*
* Prepare machine for booting other CPUs.
@@ -101,7 +159,7 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-int smp_call_function(smp_call_func_t func, void *info, int wait);
+void smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
@@ -110,6 +168,7 @@ int smp_call_function_any(const struct cpumask *mask,
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
+bool cpus_peek_for_pending_ipi(const struct cpumask *mask);
/*
* Generic and arch helpers
@@ -119,12 +178,6 @@ void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt
-/*
- * Mark the boot cpu "online" so that it can call console drivers in
- * printk() and can access its per-cpu storage.
- */
-void smp_prepare_boot_cpu(void);
-
extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
@@ -144,15 +197,13 @@ static inline void smp_send_stop(void) { }
* These macros fold the SMP functionality into a single CPU system
*/
#define raw_smp_processor_id() 0
-static inline int up_smp_call_function(smp_call_func_t func, void *info)
+static inline void up_smp_call_function(smp_call_func_t func, void *info)
{
- return 0;
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
static inline void smp_send_reschedule(int cpu) { }
-#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void call_function_init(void) { }
@@ -166,10 +217,16 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
+{
+ return false;
+}
+
+#define setup_max_cpus 0
#ifdef CONFIG_UP_LATE_INIT
extern void __init up_late_init(void);
-static inline void smp_init(void) { up_late_init(); }
+static __always_inline void smp_init(void) { up_late_init(); }
#else
static inline void smp_init(void) { }
#endif
@@ -181,29 +238,46 @@ static inline int get_boot_cpu_id(void)
#endif /* !SMP */
-/*
- * smp_processor_id(): get the current CPU ID.
+/**
+ * raw_smp_processor_id() - get the current (unstable) CPU id
*
- * if DEBUG_PREEMPT is enabled then we check whether it is
- * used in a preemption-safe way. (smp_processor_id() is safe
- * if it's used in a preemption-off critical section, or in
- * a thread that is bound to the current CPU.)
+ * For then you know what you are doing and need an unstable
+ * CPU id.
+ */
+
+/**
+ * smp_processor_id() - get the current (stable) CPU id
+ *
+ * This is the normal accessor to the CPU id and should be used
+ * whenever possible.
+ *
+ * The CPU id is stable when:
*
- * NOTE: raw_smp_processor_id() is for internal use only
- * (smp_processor_id() is the preferred variant), but in rare
- * instances it might also be used to turn off false positives
- * (i.e. smp_processor_id() use that the debugging code reports but
- * which use for some reason is legal). Don't use this to hack around
- * the warning message, as your code might not work under PREEMPT.
+ * - IRQs are disabled;
+ * - preemption is disabled;
+ * - the task is CPU affine.
+ *
+ * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
+ * when smp_processor_id() is used when the CPU id is not stable.
+ */
+
+/*
+ * Allow the architecture to differentiate between a stable and unstable read.
+ * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
+ * regular asm read for the stable.
*/
+#ifndef __smp_processor_id
+#define __smp_processor_id() raw_smp_processor_id()
+#endif
+
#ifdef CONFIG_DEBUG_PREEMPT
extern unsigned int debug_smp_processor_id(void);
# define smp_processor_id() debug_smp_processor_id()
#else
-# define smp_processor_id() raw_smp_processor_id()
+# define smp_processor_id() __smp_processor_id()
#endif
-#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
#define put_cpu() preempt_enable()
/*
@@ -212,8 +286,8 @@ static inline int get_boot_cpu_id(void)
*/
extern void arch_disable_smp_support(void);
-extern void arch_enable_nonboot_cpus_begin(void);
-extern void arch_enable_nonboot_cpus_end(void);
+extern void arch_thaw_secondary_cpus_begin(void);
+extern void arch_thaw_secondary_cpus_end(void);
void smp_setup_processor_id(void);
@@ -225,4 +299,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */