summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/smp.h2
-rw-r--r--kernel/smp.c49
3 files changed, 53 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c26a2cd99ec7..b0a48cfc0a22 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3829,16 +3829,20 @@ void sched_ttwu_pending(void *arg)
rq_unlock_irqrestore(rq, &rf);
}
-void send_call_function_single_ipi(int cpu)
+/*
+ * Prepare the scene for sending an IPI for a remote smp_call
+ *
+ * Returns true if the caller can proceed with sending the IPI.
+ * Returns false otherwise.
+ */
+bool call_function_single_prep_ipi(int cpu)
{
- struct rq *rq = cpu_rq(cpu);
-
- if (!set_nr_if_polling(rq->idle)) {
- trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL);
- arch_send_call_function_single_ipi(cpu);
- } else {
+ if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
trace_sched_wake_idle_without_ipi(cpu);
+ return false;
}
+
+ return true;
}
/*
diff --git a/kernel/sched/smp.h b/kernel/sched/smp.h
index 2eb23dd0f285..21ac44428bb0 100644
--- a/kernel/sched/smp.h
+++ b/kernel/sched/smp.h
@@ -6,7 +6,7 @@
extern void sched_ttwu_pending(void *arg);
-extern void send_call_function_single_ipi(int cpu);
+extern bool call_function_single_prep_ipi(int cpu);
#ifdef CONFIG_SMP
extern void flush_smp_call_function_queue(void);
diff --git a/kernel/smp.c b/kernel/smp.c
index 6bbfabbe62fc..37e9613a0889 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -104,9 +104,18 @@ void __init call_function_init(void)
}
static __always_inline void
-send_call_function_ipi_mask(struct cpumask *mask)
+send_call_function_single_ipi(int cpu, smp_call_func_t func)
{
- trace_ipi_send_cpumask(mask, _RET_IP_, NULL);
+ if (call_function_single_prep_ipi(cpu)) {
+ trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, func);
+ arch_send_call_function_single_ipi(cpu);
+ }
+}
+
+static __always_inline void
+send_call_function_ipi_mask(struct cpumask *mask, smp_call_func_t func)
+{
+ trace_ipi_send_cpumask(mask, _RET_IP_, func);
arch_send_call_function_ipi_mask(mask);
}
@@ -307,9 +316,8 @@ static __always_inline void csd_unlock(struct __call_single_data *csd)
smp_store_release(&csd->node.u_flags, 0);
}
-static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
-
-void __smp_call_single_queue(int cpu, struct llist_node *node)
+static __always_inline void
+raw_smp_call_single_queue(int cpu, struct llist_node *node, smp_call_func_t func)
{
/*
* The list addition should be visible to the target CPU when it pops
@@ -324,7 +332,32 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
* equipped to do the right thing...
*/
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
- send_call_function_single_ipi(cpu);
+ send_call_function_single_ipi(cpu, func);
+}
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
+
+void __smp_call_single_queue(int cpu, struct llist_node *node)
+{
+ /*
+ * We have to check the type of the CSD before queueing it, because
+ * once queued it can have its flags cleared by
+ * flush_smp_call_function_queue()
+ * even if we haven't sent the smp_call IPI yet (e.g. the stopper
+ * executes migration_cpu_stop() on the remote CPU).
+ */
+ if (trace_ipi_send_cpumask_enabled()) {
+ call_single_data_t *csd;
+ smp_call_func_t func;
+
+ csd = container_of(node, call_single_data_t, node.llist);
+ func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
+ sched_ttwu_pending : csd->func;
+
+ raw_smp_call_single_queue(cpu, node, func);
+ } else {
+ raw_smp_call_single_queue(cpu, node, NULL);
+ }
}
/*
@@ -768,9 +801,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
* provided mask.
*/
if (nr_cpus == 1)
- send_call_function_single_ipi(last_cpu);
+ send_call_function_single_ipi(last_cpu, func);
else if (likely(nr_cpus > 1))
- send_call_function_ipi_mask(cfd->cpumask_ipi);
+ send_call_function_ipi_mask(cfd->cpumask_ipi, func);
}
if (run_local && (!cond_func || cond_func(this_cpu, info))) {