summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-03-06 11:18:36 +0000
committerThomas Gleixner <tglx@linutronix.de>2013-03-13 11:39:39 +0100
commiteaa907c546f76222227dfc41784b22588af1e3d7 (patch)
tree0e23882c311bc527ae9e328c22b000ff11b85fae
parent989dcb645ca715129c5a2b39102c8334a20d9615 (diff)
tick: Provide a check for a forced broadcast pending
On the CPU which gets woken along with the target CPU of the broadcast the following happens: deep_idle() <-- spurious wakeup broadcast_exit() set forced bit enable interrupts <-- Nothing happens disable interrupts broadcast_enter() <-- Here we observe the forced bit is set deep_idle() Now after that the target CPU of the broadcast runs the broadcast handler and finds the other CPU in both the broadcast and the forced mask, sends the IPI and stuff gets back to normal. So it's not actually harmful, just more evidence for the theory, that hardware designers have access to very special drug supplies. Now there is no point in going back to deep idle just to wake up again right away via an IPI. Provide a check which allows the idle code to avoid the deep idle transition. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: LAK <linux-arm-kernel@lists.infradead.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Arjan van de Veen <arjan@infradead.org> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: Jason Liu <liu.h.jason@gmail.com> Link: http://lkml.kernel.org/r/20130306111537.565418308@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/clockchips.h6
-rw-r--r--kernel/time/tick-broadcast.c12
2 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 494d33ea78f8..646aac136eed 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -175,6 +175,12 @@ extern void tick_broadcast(const struct cpumask *mask);
extern int tick_receive_broadcast(void);
#endif
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern int tick_check_broadcast_expired(void);
+#else
+static inline int tick_check_broadcast_expired(void) { return 0; }
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void clockevents_notify(unsigned long reason, void *arg);
#else
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2100aad6b5f2..d76d816afc5d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -404,6 +404,18 @@ struct cpumask *tick_get_broadcast_oneshot_mask(void)
}
/*
+ * Called before going idle with interrupts disabled. Checks whether a
+ * broadcast event from the other core is about to happen. We detected
+ * that in tick_broadcast_oneshot_control(). The callsite can use this
+ * to avoid a deep idle transition as we are about to get the
+ * broadcast IPI right away.
+ */
+int tick_check_broadcast_expired(void)
+{
+ return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
+}
+
+/*
* Set broadcast interrupt affinity
*/
static void tick_broadcast_set_affinity(struct clock_event_device *bc,