summaryrefslogtreecommitdiff
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c108
1 files changed, 61 insertions, 47 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 27dc31a146a3..f349960f79ca 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -86,13 +86,15 @@ int smpcfd_dead_cpu(unsigned int cpu)
int smpcfd_dying_cpu(unsigned int cpu)
{
/*
- * The IPIs for the smp-call-function callbacks queued by other
- * CPUs might arrive late, either due to hardware latencies or
- * because this CPU disabled interrupts (inside stop-machine)
- * before the IPIs were sent. So flush out any pending callbacks
- * explicitly (without waiting for the IPIs to arrive), to
- * ensure that the outgoing CPU doesn't go offline with work
- * still pending.
+ * The IPIs for the smp-call-function callbacks queued by other CPUs
+ * might arrive late, either due to hardware latencies or because this
+ * CPU disabled interrupts (inside stop-machine) before the IPIs were
+ * sent. So flush out any pending callbacks explicitly (without waiting
+ * for the IPIs to arrive), to ensure that the outgoing CPU doesn't go
+ * offline with work still pending.
+ *
+ * This runs with interrupts disabled inside the stopper task invoked by
+ * stop_machine(), ensuring mutually exclusive CPU offlining and IPI flush.
*/
__flush_smp_call_function_queue(false);
irq_work_run();
@@ -170,9 +172,9 @@ static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
static DEFINE_PER_CPU(void *, cur_csd_info);
static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
-module_param(csd_lock_timeout, ulong, 0444);
+module_param(csd_lock_timeout, ulong, 0644);
static int panic_on_ipistall; /* CSD panic timeout in milliseconds, 300000 for five minutes. */
-module_param(panic_on_ipistall, int, 0444);
+module_param(panic_on_ipistall, int, 0644);
static atomic_t csd_bug_count = ATOMIC_INIT(0);
@@ -418,6 +420,10 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
*/
static int generic_exec_single(int cpu, call_single_data_t *csd)
{
+ /*
+ * Preemption already disabled here so stopper cannot run on this CPU,
+ * ensuring mutually exclusive CPU offlining and last IPI flush.
+ */
if (cpu == smp_processor_id()) {
smp_call_func_t func = csd->func;
void *info = csd->info;
@@ -638,8 +644,10 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int err;
/*
- * prevent preemption and reschedule on another processor,
- * as well as CPU removal
+ * Prevent preemption and reschedule on another CPU, as well as CPU
+ * removal. This prevents stopper from running on this CPU, thus
+ * providing mutual exclusion of the below cpu_online() check and
+ * IPI sending ensuring IPI are not missed by CPU going offline.
*/
this_cpu = get_cpu();
@@ -741,32 +749,19 @@ EXPORT_SYMBOL_GPL(smp_call_function_single_async);
*
* Selection preference:
* 1) current cpu if in @mask
- * 2) any cpu of current node if in @mask
- * 3) any other online cpu in @mask
+ * 2) nearest cpu in @mask, based on NUMA topology
*/
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait)
{
unsigned int cpu;
- const struct cpumask *nodemask;
int ret;
/* Try for same CPU (cheapest) */
cpu = get_cpu();
- if (cpumask_test_cpu(cpu, mask))
- goto call;
-
- /* Try for same node. */
- nodemask = cpumask_of_node(cpu_to_node(cpu));
- for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
- cpu = cpumask_next_and(cpu, nodemask, mask)) {
- if (cpu_online(cpu))
- goto call;
- }
+ if (!cpumask_test_cpu(cpu, mask))
+ cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu));
- /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
- cpu = cpumask_any_and(mask, cpu_online_mask);
-call:
ret = smp_call_function_single(cpu, func, info, wait);
put_cpu();
return ret;
@@ -792,7 +787,6 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
bool wait = scf_flags & SCF_WAIT;
int nr_cpus = 0;
bool run_remote = false;
- bool run_local = false;
lockdep_assert_preemption_disabled();
@@ -814,18 +808,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
*/
WARN_ON_ONCE(!in_task());
- /* Check if we need local execution. */
- if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
- run_local = true;
-
/* Check if we need remote execution, i.e., any CPU excluding this one. */
- cpu = cpumask_first_and(mask, cpu_online_mask);
- if (cpu == this_cpu)
- cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
- if (cpu < nr_cpu_ids)
- run_remote = true;
-
- if (run_remote) {
+ if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) {
cfd = this_cpu_ptr(&cfd_data);
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
__cpumask_clear_cpu(this_cpu, cfd->cpumask);
@@ -839,6 +823,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
continue;
}
+ /* Work is enqueued on a remote CPU. */
+ run_remote = true;
+
csd_lock(csd);
if (wait)
csd->node.u_flags |= CSD_TYPE_SYNC;
@@ -850,6 +837,10 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
#endif
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
+ /*
+ * Kick the remote CPU if this is the first work
+ * item enqueued.
+ */
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
nr_cpus++;
@@ -868,7 +859,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
send_call_function_ipi_mask(cfd->cpumask_ipi);
}
- if (run_local && (!cond_func || cond_func(this_cpu, info))) {
+ /* Check if we need local execution. */
+ if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
+ (!cond_func || cond_func(this_cpu, info))) {
unsigned long flags;
local_irq_save(flags);
@@ -891,16 +884,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
- * (atomically) until function has completed on other CPUs. If
- * %SCF_RUN_LOCAL is set, the function will also be run locally
- * if the local CPU is set in the @cpumask.
- *
- * If @wait is true, then returns once @func has returned.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
+ *
+ * @func is not called on the local CPU even if @mask contains it. Consider
+ * using on_each_cpu_cond_mask() instead if this is not desirable.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
@@ -1025,7 +1017,7 @@ void __init smp_init(void)
* @cond_func: A callback function that is passed a cpu id and
* the info parameter. The function is called
* with preemption disabled. The function should
- * return a blooean value indicating whether to IPI
+ * return a boolean value indicating whether to IPI
* the specified CPU.
* @func: The function to run on all applicable CPUs.
* This must be fast and non-blocking.
@@ -1096,6 +1088,28 @@ void wake_up_all_idle_cpus(void)
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
/**
+ * cpus_peek_for_pending_ipi - Check for pending IPI for CPUs
+ * @mask: The CPU mask for the CPUs to check.
+ *
+ * This function walks through the @mask to check if there are any pending IPIs
+ * scheduled, for any of the CPUs in the @mask. It does not guarantee
+ * correctness as it only provides a racy snapshot.
+ *
+ * Returns true if there is a pending IPI scheduled and false otherwise.
+ */
+bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if (!llist_empty(per_cpu_ptr(&call_single_queue, cpu)))
+ return true;
+ }
+
+ return false;
+}
+
+/**
* struct smp_call_on_cpu_struct - Call a function on a specific CPU
* @work: &work_struct
* @done: &completion to signal