diff options
Diffstat (limited to 'include/linux/hrtimer.h')
| -rw-r--r-- | include/linux/hrtimer.h | 448 |
1 files changed, 190 insertions, 258 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index d19a5c2d2270..2cf1bf65b225 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * include/linux/hrtimer.h - * * hrtimers - High-resolution kernel timers * * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> @@ -9,41 +8,51 @@ * data type definitions, declarations, prototypes * * Started by: Thomas Gleixner and Ingo Molnar - * - * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H -#include <linux/rbtree.h> -#include <linux/ktime.h> +#include <linux/hrtimer_defs.h> +#include <linux/hrtimer_types.h> #include <linux/init.h> #include <linux/list.h> -#include <linux/wait.h> -#include <linux/percpu.h> +#include <linux/percpu-defs.h> +#include <linux/rbtree.h> #include <linux/timer.h> -#include <linux/timerqueue.h> - -struct hrtimer_clock_base; -struct hrtimer_cpu_base; /* * Mode arguments of xxx_hrtimer functions: + * + * HRTIMER_MODE_ABS - Time value is absolute + * HRTIMER_MODE_REL - Time value is relative to now + * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered + * when starting the timer) + * HRTIMER_MODE_SOFT - Timer callback function will be executed in + * soft irq context + * HRTIMER_MODE_HARD - Timer callback function will be executed in + * hard irq context even on PREEMPT_RT. */ enum hrtimer_mode { - HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ - HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ - HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ - HRTIMER_MODE_ABS_PINNED = 0x02, - HRTIMER_MODE_REL_PINNED = 0x03, -}; + HRTIMER_MODE_ABS = 0x00, + HRTIMER_MODE_REL = 0x01, + HRTIMER_MODE_PINNED = 0x02, + HRTIMER_MODE_SOFT = 0x04, + HRTIMER_MODE_HARD = 0x08, -/* - * Return values for the callback function - */ -enum hrtimer_restart { - HRTIMER_NORESTART, /* Timer is not restarted */ - HRTIMER_RESTART, /* Timer must be restarted */ + HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, + HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, + + HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, + + HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, }; /* @@ -53,70 +62,25 @@ enum hrtimer_restart { * * 0x00 inactive * 0x01 enqueued into rbtree - * 0x02 callback function running - * 0x04 timer is migrated to another cpu * - * Special cases: - * 0x03 callback function running and enqueued - * (was requeued on another CPU) - * 0x05 timer was migrated on CPU hotunplug + * The callback state is not part of the timer->state because clearing it would + * mean touching the timer after the callback, this makes it impossible to free + * the timer from the callback function. + * + * Therefore we track the callback state in: * - * The "callback function running and enqueued" status is only possible on - * SMP. It happens for example when a posix timer expired and the callback + * timer->base->cpu_base->running == timer + * + * On SMP it is possible to have a "callback function running and enqueued" + * status. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer * and reacquiring the base lock of the hrtimer, another CPU can deliver the - * signal and rearm the timer. We have to preserve the callback running state, - * as otherwise the timer could be removed before the softirq code finishes the - * the handling of the timer. - * - * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state - * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This - * also affects HRTIMER_STATE_MIGRATE where the preservation is not - * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is - * enqueued on the new cpu. + * signal and rearm the timer. * * All state transitions are protected by cpu_base->lock. */ #define HRTIMER_STATE_INACTIVE 0x00 #define HRTIMER_STATE_ENQUEUED 0x01 -#define HRTIMER_STATE_CALLBACK 0x02 -#define HRTIMER_STATE_MIGRATE 0x04 - -/** - * struct hrtimer - the basic hrtimer structure - * @node: timerqueue node, which also manages node.expires, - * the absolute expiry time in the hrtimers internal - * representation. The time is related to the clock on - * which the timer is based. Is setup by adding - * slack to the _softexpires value. For non range timers - * identical to _softexpires. - * @_softexpires: the absolute earliest expiry time of the hrtimer. - * The time which was given as expiry time when the timer - * was armed. - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) - * @start_site: timer statistics field to store the site where the timer - * was started - * @start_comm: timer statistics field to store the name of the process which - * started the timer - * @start_pid: timer statistics field to store the pid of the task which - * started the timer - * - * The hrtimer structure must be initialized by hrtimer_init() - */ -struct hrtimer { - struct timerqueue_node node; - ktime_t _softexpires; - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - unsigned long state; -#ifdef CONFIG_TIMER_STATS - int start_pid; - void *start_site; - char start_comm[16]; -#endif -}; /** * struct hrtimer_sleeper - simple sleeper structure @@ -130,69 +94,6 @@ struct hrtimer_sleeper { struct task_struct *task; }; -/** - * struct hrtimer_clock_base - the timer base for a specific clock - * @cpu_base: per cpu clock base - * @index: clock type index for per_cpu support when moving a - * timer to a base on another cpu. - * @clockid: clock id for per_cpu support - * @active: red black tree root node for the active timers - * @resolution: the resolution of the clock, in nanoseconds - * @get_time: function to retrieve the current time of the clock - * @softirq_time: the time when running the hrtimer queue in the softirq - * @offset: offset of this clock to the monotonic base - */ -struct hrtimer_clock_base { - struct hrtimer_cpu_base *cpu_base; - int index; - clockid_t clockid; - struct timerqueue_head active; - ktime_t resolution; - ktime_t (*get_time)(void); - ktime_t softirq_time; - ktime_t offset; -}; - -enum hrtimer_base_type { - HRTIMER_BASE_MONOTONIC, - HRTIMER_BASE_REALTIME, - HRTIMER_BASE_BOOTTIME, - HRTIMER_BASE_TAI, - HRTIMER_MAX_CLOCK_BASES, -}; - -/* - * struct hrtimer_cpu_base - the per cpu clock bases - * @lock: lock protecting the base and associated clock bases - * and timers - * @active_bases: Bitfield to mark bases with active timers - * @clock_was_set: Indicates that clock was set from irq context. - * @expires_next: absolute time of the next event which was scheduled - * via clock_set_next_event() - * @hres_active: State of high resolution mode - * @hang_detected: The last hrtimer interrupt detected a hang - * @nr_events: Total number of hrtimer interrupt events - * @nr_retries: Total number of hrtimer interrupt retries - * @nr_hangs: Total number of hrtimer interrupt hangs - * @max_hang_time: Maximum time spent in hrtimer_interrupt - * @clock_base: array of clock bases for this cpu - */ -struct hrtimer_cpu_base { - raw_spinlock_t lock; - unsigned int active_bases; - unsigned int clock_was_set; -#ifdef CONFIG_HIGH_RES_TIMERS - ktime_t expires_next; - int hres_active; - int hang_detected; - unsigned long nr_events; - unsigned long nr_retries; - unsigned long nr_hangs; - ktime_t max_hang_time; -#endif - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; -}; - static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { timer->node.expires = time; @@ -205,7 +106,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time timer->node.expires = ktime_add_safe(time, delta); } -static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) +static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) { timer->_softexpires = time; timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); @@ -213,8 +114,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { - timer->node.expires.tv64 = tv64; - timer->_softexpires.tv64 = tv64; + timer->node.expires = tv64; + timer->_softexpires = tv64; } static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) @@ -241,11 +142,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { - return timer->node.expires.tv64; + return timer->node.expires; } static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) { - return timer->_softexpires.tv64; + return timer->_softexpires; } static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) @@ -253,160 +154,162 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) return ktime_to_ns(timer->node.expires); } -static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) -{ - return ktime_sub(timer->node.expires, timer->base->get_time()); -} - -#ifdef CONFIG_HIGH_RES_TIMERS -struct clock_event_device; - -extern void hrtimer_interrupt(struct clock_event_device *dev); +ktime_t hrtimer_cb_get_time(const struct hrtimer *timer); -/* - * In high resolution mode the time reference must be read accurate - */ -static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) { - return timer->base->get_time(); + return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer)); } static inline int hrtimer_is_hres_active(struct hrtimer *timer) { - return timer->base->cpu_base->hres_active; + return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? + timer->base->cpu_base->hres_active : 0; } -extern void hrtimer_peek_ahead_timers(void); +#ifdef CONFIG_HIGH_RES_TIMERS +struct clock_event_device; -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -# define HIGH_RES_NSEC 1 -# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC } -# define MONOTONIC_RES_NSEC HIGH_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_HIGH_RES +extern void hrtimer_interrupt(struct clock_event_device *dev); -extern void clock_was_set_delayed(void); +extern unsigned int hrtimer_resolution; #else -# define MONOTONIC_RES_NSEC LOW_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_LOW_RES +#define hrtimer_resolution (unsigned int)LOW_RES_NSEC -static inline void hrtimer_peek_ahead_timers(void) { } +#endif -/* - * In non high resolution mode the time reference is taken from - * the base softirq time variable. - */ -static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +static inline ktime_t +__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) { - return timer->base->softirq_time; + ktime_t rem = ktime_sub(timer->node.expires, now); + + /* + * Adjust relative timers for the extra we added in + * hrtimer_start_range_ns() to prevent short timeouts. + */ + if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) + rem -= hrtimer_resolution; + return rem; } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) +static inline ktime_t +hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) { - return 0; + return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer)); } -static inline void clock_was_set_delayed(void) { } - -#endif - -extern void clock_was_set(void); #ifdef CONFIG_TIMERFD extern void timerfd_clock_was_set(void); +extern void timerfd_resume(void); #else static inline void timerfd_clock_was_set(void) { } +static inline void timerfd_resume(void) { } #endif -extern void hrtimers_resume(void); - -extern ktime_t ktime_get(void); -extern ktime_t ktime_get_real(void); -extern ktime_t ktime_get_boottime(void); -extern ktime_t ktime_get_monotonic_offset(void); -extern ktime_t ktime_get_clocktai(void); -extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, - ktime_t *offs_tai); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); +#ifdef CONFIG_PREEMPT_RT +void hrtimer_cancel_wait_running(const struct hrtimer *timer); +#else +static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) +{ + cpu_relax(); +} +#endif + +static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused) +{ + return HRTIMER_NORESTART; +} /* Exported timer functions: */ /* Initialize timers: */ -extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); +extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_on_stack(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); - extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else -static inline void hrtimer_init_on_stack(struct hrtimer *timer, - clockid_t which_clock, - enum hrtimer_mode mode) -{ - hrtimer_init(timer, which_clock, mode); -} static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif /* Basic timer operations: */ -extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, - const enum hrtimer_mode mode); -extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long range_ns, const enum hrtimer_mode mode); -extern int -__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - unsigned long delta_ns, - const enum hrtimer_mode mode, int wakeup); +extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + u64 range_ns, const enum hrtimer_mode mode); + +/** + * hrtimer_start - (re)start an hrtimer + * @timer: the timer to be added + * @tim: expiry time + * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or + * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); + * softirq based mode is considered for debug purpose only! + */ +static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode) +{ + hrtimer_start_range_ns(timer, tim, 0, mode); +} extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); -static inline int hrtimer_start_expires(struct hrtimer *timer, - enum hrtimer_mode mode) +static inline void hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) { - unsigned long delta; + u64 delta; ktime_t soft, hard; soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); delta = ktime_to_ns(ktime_sub(hard, soft)); - return hrtimer_start_range_ns(timer, soft, delta, mode); + hrtimer_start_range_ns(timer, soft, delta, mode); } -static inline int hrtimer_restart(struct hrtimer *timer) +void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, + enum hrtimer_mode mode); + +static inline void hrtimer_restart(struct hrtimer *timer) { - return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } /* Query timers: */ -extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); -extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); - -extern ktime_t hrtimer_get_next_event(void); +extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); -/* - * A timer is active, when it is enqueued into the rbtree or the - * callback function is running or it's in the state of being migrated - * to another cpu. +/** + * hrtimer_get_remaining - get remaining time for the timer + * @timer: the timer to read */ -static inline int hrtimer_active(const struct hrtimer *timer) +static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) { - return timer->state != HRTIMER_STATE_INACTIVE; + return __hrtimer_get_remaining(timer, false); } -/* - * Helper function to check, whether the timer is on one of the queues +extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); + +extern bool hrtimer_active(const struct hrtimer *timer); + +/** + * hrtimer_is_queued - check, whether the timer is on one of the queues + * @timer: Timer to check + * + * Returns: True if the timer is queued, false otherwise + * + * The function can be used lockless, but it gives only a current snapshot. */ -static inline int hrtimer_is_queued(struct hrtimer *timer) +static inline bool hrtimer_is_queued(struct hrtimer *timer) { - return timer->state & HRTIMER_STATE_ENQUEUED; + /* The READ_ONCE pairs with the update functions of timer->state */ + return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); } /* @@ -415,50 +318,79 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) */ static inline int hrtimer_callback_running(struct hrtimer *timer) { - return timer->state & HRTIMER_STATE_CALLBACK; + return timer->base->running == timer; +} + +/** + * hrtimer_update_function - Update the timer's callback function + * @timer: Timer to update + * @function: New callback function + * + * Only safe to call if the timer is not enqueued. Can be called in the callback function if the + * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED). + */ +static inline void hrtimer_update_function(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *)) +{ +#ifdef CONFIG_PROVE_LOCKING + guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock); + + if (WARN_ON_ONCE(hrtimer_is_queued(timer))) + return; + + if (WARN_ON_ONCE(!function)) + return; +#endif + ACCESS_PRIVATE(timer, function) = function; } /* Forward a hrtimer so it expires after now: */ extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); -/* Forward a hrtimer so it expires after the hrtimer's current now */ +/** + * hrtimer_forward_now() - forward the timer expiry so it expires after now + * @timer: hrtimer to forward + * @interval: the interval to forward + * + * It is a variant of hrtimer_forward(). The timer will expire after the current + * time of the hrtimer clock base. See hrtimer_forward() for details. + */ static inline u64 hrtimer_forward_now(struct hrtimer *timer, ktime_t interval) { - return hrtimer_forward(timer, timer->base->get_time(), interval); + return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval); } /* Precise sleep: */ -extern long hrtimer_nanosleep(struct timespec *rqtp, - struct timespec __user *rmtp, - const enum hrtimer_mode mode, - const clockid_t clockid); -extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, - struct task_struct *tsk); +extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); +extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, + const clockid_t clockid); -extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, - const enum hrtimer_mode mode); +extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, + const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, - unsigned long delta, const enum hrtimer_mode mode, int clock); + u64 delta, + const enum hrtimer_mode mode, + clockid_t clock_id); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ extern void hrtimer_run_queues(void); -extern void hrtimer_run_pending(void); /* Bootup initialization: */ extern void __init hrtimers_init(void); -#if BITS_PER_LONG < 64 -extern u64 ktime_divns(const ktime_t kt, s64 div); -#else /* BITS_PER_LONG < 64 */ -# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) -#endif - /* Show pending timers: */ extern void sysrq_timer_list_show(void); +int hrtimers_prepare_cpu(unsigned int cpu); +int hrtimers_cpu_starting(unsigned int cpu); +#ifdef CONFIG_HOTPLUG_CPU +int hrtimers_cpu_dying(unsigned int cpu); +#else +#define hrtimers_cpu_dying NULL +#endif + #endif |
