summaryrefslogtreecommitdiff
path: root/kernel/sched/clock.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-12-15 13:35:52 +0100
committerIngo Molnar <mingo@kernel.org>2017-01-14 11:29:59 +0100
commit9881b024b7d7671f6a014091bc96506b89081802 (patch)
tree83319d1a51c4e7a880c329aaca38d9833dc8d4ce /kernel/sched/clock.c
parent555570d744f8150d3fce6083f144026cd1e63627 (diff)
sched/clock: Delay switching sched_clock to stable
Currently we switch to the stable sched_clock if we guess the TSC is usable, and then switch back to the unstable path if it turns out TSC isn't stable during SMP bringup after all. Delay switching to the stable path until after SMP bringup is complete. This way we'll avoid switching during the time we detect the worst of the TSC offences. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/clock.c')
-rw-r--r--kernel/sched/clock.c50
1 files changed, 22 insertions, 28 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 5d6dd38b449c..b3466d4e0cc2 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -77,6 +77,11 @@ EXPORT_SYMBOL_GPL(sched_clock);
__read_mostly int sched_clock_running;
+void sched_clock_init(void)
+{
+ sched_clock_running = 1;
+}
+
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
static int __sched_clock_stable_early;
@@ -96,12 +101,18 @@ void set_sched_clock_stable(void)
{
__sched_clock_stable_early = 1;
- smp_mb(); /* matches sched_clock_init() */
-
- if (!sched_clock_running)
- return;
+ smp_mb(); /* matches sched_clock_init_late() */
- __set_sched_clock_stable();
+ /*
+ * This really should only be called early (before
+ * sched_clock_init_late()) when guestimating our sched_clock() is
+ * solid.
+ *
+ * After that we test stability and we can negate our guess using
+ * clear_sched_clock_stable, possibly from a watchdog.
+ */
+ if (WARN_ON_ONCE(sched_clock_running == 2))
+ __set_sched_clock_stable();
}
static void __clear_sched_clock_stable(struct work_struct *work)
@@ -117,12 +128,10 @@ void clear_sched_clock_stable(void)
{
__sched_clock_stable_early = 0;
- smp_mb(); /* matches sched_clock_init() */
-
- if (!sched_clock_running)
- return;
+ smp_mb(); /* matches sched_clock_init_late() */
- schedule_work(&sched_clock_work);
+ if (sched_clock_running == 2)
+ schedule_work(&sched_clock_work);
}
struct sched_clock_data {
@@ -143,20 +152,9 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
return &per_cpu(sched_clock_data, cpu);
}
-void sched_clock_init(void)
+void sched_clock_init_late(void)
{
- u64 ktime_now = ktime_to_ns(ktime_get());
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct sched_clock_data *scd = cpu_sdc(cpu);
-
- scd->tick_raw = 0;
- scd->tick_gtod = ktime_now;
- scd->clock = ktime_now;
- }
-
- sched_clock_running = 1;
+ sched_clock_running = 2;
/*
* Ensure that it is impossible to not do a static_key update.
@@ -362,11 +360,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-void sched_clock_init(void)
-{
- sched_clock_running = 1;
-}
-
u64 sched_clock_cpu(int cpu)
{
if (unlikely(!sched_clock_running))
@@ -374,6 +367,7 @@ u64 sched_clock_cpu(int cpu)
return sched_clock();
}
+
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*