summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorAnna-Maria Gleixner <anna-maria@linutronix.de>2017-12-21 11:41:54 +0100
committerIngo Molnar <mingo@kernel.org>2018-01-16 03:00:47 +0100
commitdd934aa8ad1fbaab3d916125c7fe42fff75aa7ff (patch)
tree7779b34d8e56e3a28b9147121fc34b518a38e4ae /kernel/time
parentad38f596d8e4babc19be8b21a7a49debffb4a7f5 (diff)
hrtimer: Use irqsave/irqrestore around __run_hrtimer()
__run_hrtimer() is called with the hrtimer_cpu_base.lock held and interrupts disabled. Before invoking the timer callback the base lock is dropped, but interrupts stay disabled. The upcoming support for softirq based hrtimers requires that interrupts are enabled before the timer callback is invoked. To avoid code duplication, take hrtimer_cpu_base.lock with raw_spin_lock_irqsave(flags) at the call site and hand in the flags as a parameter. So raw_spin_unlock_irqrestore() before the callback invocation will either keep interrupts disabled in interrupt context or restore to interrupt enabled state when called from softirq context. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Cc: John Stultz <john.stultz@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: keescook@chromium.org Link: http://lkml.kernel.org/r/20171221104205.7269-26-anna-maria@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 5d9b81d224b3..31ccd86e63c0 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1159,7 +1159,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
- struct hrtimer *timer, ktime_t *now)
+ struct hrtimer *timer, ktime_t *now,
+ unsigned long flags)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
@@ -1194,11 +1195,11 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
* protected against migration to a different CPU even if the lock
* is dropped.
*/
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irq(&cpu_base->lock);
/*
* Note: We clear the running state after enqueue_hrtimer and
@@ -1226,7 +1227,8 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
base->running = NULL;
}
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+ unsigned long flags)
{
struct hrtimer_clock_base *base;
unsigned int active = cpu_base->active_bases;
@@ -1257,7 +1259,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
- __run_hrtimer(cpu_base, base, timer, &basenow);
+ __run_hrtimer(cpu_base, base, timer, &basenow, flags);
}
}
}
@@ -1272,13 +1274,14 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
+ unsigned long flags;
int retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event = KTIME_MAX;
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
@@ -1291,7 +1294,7 @@ retry:
*/
cpu_base->expires_next = KTIME_MAX;
- __hrtimer_run_queues(cpu_base, now);
+ __hrtimer_run_queues(cpu_base, now, flags);
/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base);
@@ -1301,7 +1304,7 @@ retry:
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
@@ -1322,7 +1325,7 @@ retry:
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
@@ -1335,7 +1338,8 @@ retry:
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta;
@@ -1377,6 +1381,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
@@ -1394,10 +1399,10 @@ void hrtimer_run_queues(void)
return;
}
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
+ __hrtimer_run_queues(cpu_base, now, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
/*