diff options
Diffstat (limited to 'kernel/time/sched_clock.c')
| -rw-r--r-- | kernel/time/sched_clock.c | 361 |
1 files changed, 241 insertions, 120 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f09..f39111830ca3 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -1,212 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * sched_clock.c: support for extending counters to full 64-bit ns counter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Generic sched_clock() support, to extend low level hardware time + * counters to full 64-bit ns values. */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/ktime.h> #include <linux/kernel.h> +#include <linux/math.h> #include <linux/moduleparam.h> #include <linux/sched.h> +#include <linux/sched/clock.h> #include <linux/syscore_ops.h> -#include <linux/timer.h> +#include <linux/hrtimer.h> #include <linux/sched_clock.h> +#include <linux/seqlock.h> +#include <linux/bitops.h> + +#include "timekeeping.h" +/** + * struct clock_data - all data needed for sched_clock() (including + * registration of a new clock source) + * + * @seq: Sequence counter for protecting updates. The lowest + * bit is the index for @read_data. + * @read_data: Data required to read from sched_clock. + * @wrap_kt: Duration for which clock can run before wrapping. + * @rate: Tick rate of the registered clock. + * @actual_read_sched_clock: Registered hardware level clock read function. + * + * The ordering of this structure has been chosen to optimize cache + * performance. In particular 'seq' and 'read_data[0]' (combined) should fit + * into a single 64-byte cache line. + */ struct clock_data { - u64 epoch_ns; - u32 epoch_cyc; - u32 epoch_cyc_copy; - unsigned long rate; - u32 mult; - u32 shift; - bool suspended; + seqcount_latch_t seq; + struct clock_read_data read_data[2]; + ktime_t wrap_kt; + unsigned long rate; + + u64 (*actual_read_sched_clock)(void); }; -static void sched_clock_poll(unsigned long wrap_ticks); -static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); +static struct hrtimer sched_clock_timer; static int irqtime = -1; core_param(irqtime, irqtime, int, 0400); -static struct clock_data cd = { - .mult = NSEC_PER_SEC / HZ, -}; +static u64 notrace jiffy_sched_clock_read(void) +{ + /* + * We don't need to use get_jiffies_64 on 32-bit arches here + * because we register with BITS_PER_LONG + */ + return (u64)(jiffies - INITIAL_JIFFIES); +} -static u32 __read_mostly sched_clock_mask = 0xffffffff; +static struct clock_data cd ____cacheline_aligned = { + .read_data[0] = { .mult = NSEC_PER_SEC / HZ, + .read_sched_clock = jiffy_sched_clock_read, }, + .actual_read_sched_clock = jiffy_sched_clock_read, +}; -static u32 notrace jiffy_sched_clock_read(void) +static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) { - return (u32)(jiffies - INITIAL_JIFFIES); + return (cyc * mult) >> shift; } -static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; +notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) +{ + *seq = read_seqcount_latch(&cd.seq); + return cd.read_data + (*seq & 1); +} -static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) +notrace int sched_clock_read_retry(unsigned int seq) { - return (cyc * mult) >> shift; + return read_seqcount_latch_retry(&cd.seq, seq); } -static unsigned long long notrace sched_clock_32(void) +static __always_inline unsigned long long __sched_clock(void) { - u64 epoch_ns; - u32 epoch_cyc; - u32 cyc; + struct clock_read_data *rd; + unsigned int seq; + u64 cyc, res; - if (cd.suspended) - return cd.epoch_ns; + do { + seq = raw_read_seqcount_latch(&cd.seq); + rd = cd.read_data + (seq & 1); + + cyc = (rd->read_sched_clock() - rd->epoch_cyc) & + rd->sched_clock_mask; + res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); + } while (raw_read_seqcount_latch_retry(&cd.seq, seq)); + return res; +} + +unsigned long long noinstr sched_clock_noinstr(void) +{ + return __sched_clock(); +} + +unsigned long long notrace sched_clock(void) +{ + unsigned long long ns; + preempt_disable_notrace(); /* - * Load the epoch_cyc and epoch_ns atomically. We do this by - * ensuring that we always write epoch_cyc, epoch_ns and - * epoch_cyc_copy in strict order, and read them in strict order. - * If epoch_cyc and epoch_cyc_copy are not equal, then we're in - * the middle of an update, and we should repeat the load. + * All of __sched_clock() is a seqcount_latch reader critical section, + * but relies on the raw helpers which are uninstrumented. For KCSAN, + * mark all accesses in __sched_clock() as atomic. */ - do { - epoch_cyc = cd.epoch_cyc; - smp_rmb(); - epoch_ns = cd.epoch_ns; - smp_rmb(); - } while (epoch_cyc != cd.epoch_cyc_copy); + kcsan_nestable_atomic_begin(); + ns = __sched_clock(); + kcsan_nestable_atomic_end(); + preempt_enable_notrace(); + return ns; +} + +/* + * Updating the data required to read the clock. + * + * sched_clock() will never observe mis-matched data even if called from + * an NMI. We do this by maintaining an odd/even copy of the data and + * steering sched_clock() to one or the other using a sequence counter. + * In order to preserve the data cache profile of sched_clock() as much + * as possible the system reverts back to the even copy when the update + * completes; the odd copy is used *only* during an update. + */ +static void update_clock_read_data(struct clock_read_data *rd) +{ + /* steer readers towards the odd copy */ + write_seqcount_latch_begin(&cd.seq); + + /* now its safe for us to update the normal (even) copy */ + cd.read_data[0] = *rd; + + /* switch readers back to the even copy */ + write_seqcount_latch(&cd.seq); - cyc = read_sched_clock(); - cyc = (cyc - epoch_cyc) & sched_clock_mask; - return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); + /* update the backup (odd) copy with the new data */ + cd.read_data[1] = *rd; + + write_seqcount_latch_end(&cd.seq); } /* - * Atomically update the sched_clock epoch. + * Atomically update the sched_clock() epoch. */ -static void notrace update_sched_clock(void) +static void update_sched_clock(void) { - unsigned long flags; - u32 cyc; + u64 cyc; u64 ns; + struct clock_read_data rd; - cyc = read_sched_clock(); - ns = cd.epoch_ns + - cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, - cd.mult, cd.shift); - /* - * Write epoch_cyc and epoch_ns in a way that the update is - * detectable in cyc_to_fixed_sched_clock(). - */ - raw_local_irq_save(flags); - cd.epoch_cyc_copy = cyc; - smp_wmb(); - cd.epoch_ns = ns; - smp_wmb(); - cd.epoch_cyc = cyc; - raw_local_irq_restore(flags); + rd = cd.read_data[0]; + + cyc = cd.actual_read_sched_clock(); + ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); + + rd.epoch_ns = ns; + rd.epoch_cyc = cyc; + + update_clock_read_data(&rd); } -static void sched_clock_poll(unsigned long wrap_ticks) +static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) { - mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); update_sched_clock(); + hrtimer_forward_now(hrt, cd.wrap_kt); + + return HRTIMER_RESTART; } -void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) +void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) { - unsigned long r, w; - u64 res, wrap; + u64 res, wrap, new_mask, new_epoch, cyc, ns; + u32 new_mult, new_shift; + unsigned long r, flags; char r_unit; + struct clock_read_data rd; if (cd.rate > rate) return; - BUG_ON(bits > 32); - WARN_ON(!irqs_disabled()); - read_sched_clock = read; - sched_clock_mask = (1 << bits) - 1; + /* Cannot register a sched_clock with interrupts on */ + local_irq_save(flags); + + /* Calculate the mult/shift to convert counter ticks to ns. */ + clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); + + new_mask = CLOCKSOURCE_MASK(bits); cd.rate = rate; - /* calculate the mult/shift to convert counter ticks to ns. */ - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); + /* Calculate how many nanosecs until we risk wrapping */ + wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); + cd.wrap_kt = ns_to_ktime(wrap); + + rd = cd.read_data[0]; + + /* Update epoch for new counter and update 'epoch_ns' from old counter*/ + new_epoch = read(); + cyc = cd.actual_read_sched_clock(); + ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); + cd.actual_read_sched_clock = read; + + rd.read_sched_clock = read; + rd.sched_clock_mask = new_mask; + rd.mult = new_mult; + rd.shift = new_shift; + rd.epoch_cyc = new_epoch; + rd.epoch_ns = ns; + + update_clock_read_data(&rd); + + if (sched_clock_timer.function != NULL) { + /* update timeout for clock wrap */ + hrtimer_start(&sched_clock_timer, cd.wrap_kt, + HRTIMER_MODE_REL_HARD); + } r = rate; if (r >= 4000000) { - r /= 1000000; + r = DIV_ROUND_CLOSEST(r, 1000000); r_unit = 'M'; - } else if (r >= 1000) { - r /= 1000; + } else if (r >= 4000) { + r = DIV_ROUND_CLOSEST(r, 1000); r_unit = 'k'; - } else + } else { r_unit = ' '; + } + + /* Calculate the ns resolution of this counter */ + res = cyc_to_ns(1ULL, new_mult, new_shift); + + pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", + bits, r, r_unit, res, wrap); - /* calculate how many ns until we wrap */ - wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); - do_div(wrap, NSEC_PER_MSEC); - w = wrap; + /* Enable IRQ time accounting if we have a fast enough sched_clock() */ + if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) + enable_sched_clock_irqtime(); - /* calculate the ns resolution of this counter */ - res = cyc_to_ns(1ULL, cd.mult, cd.shift); - pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", - bits, r, r_unit, res, w); + local_irq_restore(flags); + pr_debug("Registered %pS as sched_clock source\n", read); +} +EXPORT_SYMBOL_GPL(sched_clock_register); + +void __init generic_sched_clock_init(void) +{ /* - * Start the timer to keep sched_clock() properly updated and - * sets the initial epoch. + * If no sched_clock() function has been provided at that point, + * make it the final one. */ - sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); + if (cd.actual_read_sched_clock == jiffy_sched_clock_read) + sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); + update_sched_clock(); /* - * Ensure that sched_clock() starts off at 0ns + * Start the timer to keep sched_clock() properly updated and + * sets the initial epoch. */ - cd.epoch_ns = 0; + hrtimer_setup(&sched_clock_timer, sched_clock_poll, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); +} - /* Enable IRQ time accounting if we have a fast enough sched_clock */ - if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) - enable_sched_clock_irqtime(); +/* + * Clock read function for use when the clock is suspended. + * + * This function makes it appear to sched_clock() as if the clock + * stopped counting at its last update. + * + * This function must only be called from the critical + * section in sched_clock(). It relies on the read_seqcount_retry() + * at the end of the critical section to be sure we observe the + * correct copy of 'epoch_cyc'. + */ +static u64 notrace suspended_sched_clock_read(void) +{ + unsigned int seq = read_seqcount_latch(&cd.seq); - pr_debug("Registered %pF as sched_clock source\n", read); + return cd.read_data[seq & 1].epoch_cyc; } -unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; - -unsigned long long notrace sched_clock(void) +int sched_clock_suspend(void) { - return sched_clock_func(); + struct clock_read_data *rd = &cd.read_data[0]; + + update_sched_clock(); + hrtimer_cancel(&sched_clock_timer); + rd->read_sched_clock = suspended_sched_clock_read; + + return 0; } -void __init sched_clock_postinit(void) +static int sched_clock_syscore_suspend(void *data) { - /* - * If no sched_clock function has been provided at that point, - * make it the final one one. - */ - if (read_sched_clock == jiffy_sched_clock_read) - setup_sched_clock(jiffy_sched_clock_read, 32, HZ); - - sched_clock_poll(sched_clock_timer.data); + return sched_clock_suspend(); } -static int sched_clock_suspend(void) +void sched_clock_resume(void) { - sched_clock_poll(sched_clock_timer.data); - cd.suspended = true; - return 0; + struct clock_read_data *rd = &cd.read_data[0]; + + rd->epoch_cyc = cd.actual_read_sched_clock(); + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); + rd->read_sched_clock = cd.actual_read_sched_clock; } -static void sched_clock_resume(void) +static void sched_clock_syscore_resume(void *data) { - cd.epoch_cyc = read_sched_clock(); - cd.epoch_cyc_copy = cd.epoch_cyc; - cd.suspended = false; + sched_clock_resume(); } -static struct syscore_ops sched_clock_ops = { - .suspend = sched_clock_suspend, - .resume = sched_clock_resume, +static const struct syscore_ops sched_clock_syscore_ops = { + .suspend = sched_clock_syscore_suspend, + .resume = sched_clock_syscore_resume, +}; + +static struct syscore sched_clock_syscore = { + .ops = &sched_clock_syscore_ops, }; static int __init sched_clock_syscore_init(void) { - register_syscore_ops(&sched_clock_ops); + register_syscore(&sched_clock_syscore); + return 0; } device_initcall(sched_clock_syscore_init); |
