From 44511ab344c755d1f216bf421e92fbc2777e87fe Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 16 Feb 2021 16:50:20 +0100 Subject: time/debug: Remove dentry pointer for debugfs There is no need to keep the dentry pointer around for the created debugfs file, as it is only needed when removing it from the system. When it is to be removed, ask debugfs itself for the pointer, to save on storage and make things a bit simpler. Signed-off-by: Greg Kroah-Hartman Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210216155020.1012407-1-gregkh@linuxfoundation.org --- kernel/time/test_udelay.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c index 77c63005dc4e..13b11eb62685 100644 --- a/kernel/time/test_udelay.c +++ b/kernel/time/test_udelay.c @@ -21,7 +21,6 @@ #define DEBUGFS_FILENAME "udelay_test" static DEFINE_MUTEX(udelay_test_lock); -static struct dentry *udelay_test_debugfs_file; static int udelay_test_usecs; static int udelay_test_iterations = DEFAULT_ITERATIONS; @@ -138,8 +137,8 @@ static const struct file_operations udelay_test_debugfs_ops = { static int __init udelay_test_init(void) { mutex_lock(&udelay_test_lock); - udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME, - S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops); + debugfs_create_file(DEBUGFS_FILENAME, S_IRUSR, NULL, NULL, + &udelay_test_debugfs_ops); mutex_unlock(&udelay_test_lock); return 0; @@ -150,7 +149,7 @@ module_init(udelay_test_init); static void __exit udelay_test_exit(void) { mutex_lock(&udelay_test_lock); - debugfs_remove(udelay_test_debugfs_file); + debugfs_remove(debugfs_lookup(DEBUGFS_FILENAME, NULL)); mutex_unlock(&udelay_test_lock); } -- cgit From 4bf07f6562a01a488877e05267808da7147f44a5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Mar 2021 22:39:03 +0100 Subject: timekeeping, clocksource: Fix various typos in comments Fix ~56 single-word typos in timekeeping & clocksource code comments. Signed-off-by: Ingo Molnar Cc: Thomas Gleixner Cc: John Stultz Cc: Stephen Boyd Cc: Daniel Lezcano Cc: linux-kernel@vger.kernel.org --- kernel/time/alarmtimer.c | 6 +++--- kernel/time/clocksource.c | 4 ++-- kernel/time/hrtimer.c | 18 +++++++++--------- kernel/time/jiffies.c | 2 +- kernel/time/ntp.c | 2 +- kernel/time/posix-cpu-timers.c | 6 +++--- kernel/time/tick-broadcast-hrtimer.c | 2 +- kernel/time/tick-broadcast.c | 4 ++-- kernel/time/tick-oneshot.c | 2 +- kernel/time/tick-sched.c | 2 +- kernel/time/tick-sched.h | 2 +- kernel/time/time.c | 2 +- kernel/time/timekeeping.c | 10 +++++----- kernel/time/timer.c | 4 ++-- kernel/time/vsyscall.c | 2 +- 15 files changed, 34 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 98d7a15e8cf6..e9af8fae0bfb 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -2,13 +2,13 @@ /* * Alarmtimer interface * - * This interface provides a timer which is similarto hrtimers, + * This interface provides a timer which is similar to hrtimers, * but triggers a RTC alarm if the box is suspend. * * This interface is influenced by the Android RTC Alarm timer * interface. * - * Copyright (C) 2010 IBM Corperation + * Copyright (C) 2010 IBM Corporation * * Author: John Stultz */ @@ -811,7 +811,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) /** * alarm_timer_nsleep - alarmtimer nanosleep * @which_clock: clockid - * @flags: determins abstime or relative + * @flags: determines abstime or relative * @tsreq: requested sleep time (abs or rel) * * Handles clock_nanosleep calls against _ALARM clockids diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cce484a2cc7c..1d1a61371b5a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -38,7 +38,7 @@ * calculated mult and shift factors. This guarantees that no 64bit * overflow happens when the input value of the conversion is * multiplied with the calculated mult factor. Larger ranges may - * reduce the conversion accuracy by chosing smaller mult and shift + * reduce the conversion accuracy by choosing smaller mult and shift * factors. */ void @@ -518,7 +518,7 @@ static void clocksource_suspend_select(bool fallback) * the suspend time when resuming system. * * This function is called late in the suspend process from timekeeping_suspend(), - * that means processes are freezed, non-boot cpus and interrupts are disabled + * that means processes are frozen, non-boot cpus and interrupts are disabled * now. It is therefore possible to start the suspend timer without taking the * clocksource mutex. */ diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 788b9d137de4..30b356c93c78 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -683,7 +683,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) * T1 is removed, so this code is called and would reprogram * the hardware to 5s from now. Any hrtimer_start after that * will not reprogram the hardware due to hang_detected being - * set. So we'd effectivly block all timers until the T2 event + * set. So we'd effectively block all timers until the T2 event * fires. */ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) @@ -1019,7 +1019,7 @@ static void __remove_hrtimer(struct hrtimer *timer, * cpu_base->next_timer. This happens when we remove the first * timer on a remote cpu. No harm as we never dereference * cpu_base->next_timer. So the worst thing what can happen is - * an superflous call to hrtimer_force_reprogram() on the + * an superfluous call to hrtimer_force_reprogram() on the * remote cpu later on if the same timer gets enqueued again. */ if (reprogram && timer == cpu_base->next_timer) @@ -1212,7 +1212,7 @@ static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) * The counterpart to hrtimer_cancel_wait_running(). * * If there is a waiter for cpu_base->expiry_lock, then it was waiting for - * the timer callback to finish. Drop expiry_lock and reaquire it. That + * the timer callback to finish. Drop expiry_lock and reacquire it. That * allows the waiter to acquire the lock and make progress. */ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, @@ -1398,7 +1398,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, int base; /* - * On PREEMPT_RT enabled kernels hrtimers which are not explicitely + * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context for latency reasons and because the callbacks * can invoke functions which might sleep on RT, e.g. spin_lock(). @@ -1430,7 +1430,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, * hrtimer_init - initialize a timer to the given clock * @timer: the timer to be initialized * @clock_id: the clock to be used - * @mode: The modes which are relevant for intitialization: + * @mode: The modes which are relevant for initialization: * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, * HRTIMER_MODE_REL_SOFT * @@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active); * insufficient for that. * * The sequence numbers are required because otherwise we could still observe - * a false negative if the read side got smeared over multiple consequtive + * a false negative if the read side got smeared over multiple consecutive * __run_hrtimer() invocations. */ @@ -1588,7 +1588,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, * minimizing wakeups, not running timers at the * earliest interrupt after their soft expiration. * This allows us to avoid using a Priority Search - * Tree, which can answer a stabbing querry for + * Tree, which can answer a stabbing query for * overlapping intervals and instead use the simple * BST we already have. * We don't add extra wakeups by delaying timers that @@ -1822,7 +1822,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode) { /* - * On PREEMPT_RT enabled kernels hrtimers which are not explicitely + * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context either for latency reasons or because the * hrtimer callback takes regular spinlocks or invokes other @@ -1835,7 +1835,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, * the same CPU. That causes a latency spike due to the wakeup of * a gazillion threads. * - * OTOH, priviledged real-time user space applications rely on the + * OTOH, privileged real-time user space applications rely on the * low latency of hard interrupt wakeups. If the current task is in * a real-time scheduling class, mark the mode for hard interrupt * expiry. diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a5cffe2a1770..a492e4da69ba 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -44,7 +44,7 @@ static u64 jiffies_read(struct clocksource *cs) * the timer interrupt frequency HZ and it suffers * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer - * interrupt hardware to accuratly tick at the + * interrupt hardware to accurately tick at the * requested HZ value. It is also not recommended * for "tick-less" systems. */ diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5247afd7f345..406dccb79c2b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -544,7 +544,7 @@ static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, struct timespec64 *to_set, const struct timespec64 *now) { - /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ + /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; struct timespec64 delay = {.tv_sec = -1, .tv_nsec = set_offset_nsec}; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index a71758e34e45..b145e6835e34 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -279,7 +279,7 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) * @tsk: Task for which cputime needs to be started * @samples: Storage for time samples * - * The thread group cputime accouting is avoided when there are no posix + * The thread group cputime accounting is avoided when there are no posix * CPU timers armed. Before starting a timer it's required to check whether * the time accounting is active. If not, a full update of the atomic * accounting store needs to be done and the accounting enabled. @@ -390,7 +390,7 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) /* * If posix timer expiry is handled in task work context then * timer::it_lock can be taken without disabling interrupts as all - * other locking happens in task context. This requires a seperate + * other locking happens in task context. This requires a separate * lock class key otherwise regular posix timer expiry would record * the lock class being taken in interrupt context and generate a * false positive warning. @@ -1216,7 +1216,7 @@ static void handle_posix_cpu_timers(struct task_struct *tsk) check_process_timers(tsk, &firing); /* - * The above timer checks have updated the exipry cache and + * The above timer checks have updated the expiry cache and * because nothing can have queued or modified timers after * sighand lock was taken above it is guaranteed to be * consistent. So the next timer interrupt fastpath check diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index b5a65e212df2..797eb93103ad 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -53,7 +53,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) * reasons. * * Each caller tries to arm the hrtimer on its own CPU, but if the - * hrtimer callbback function is currently running, then + * hrtimer callback function is currently running, then * hrtimer_start() cannot move it and the timer stays on the CPU on * which it is assigned at the moment. * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 5a23829372c7..6ec7855ab88d 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -157,7 +157,7 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev) } /* - * Check, if the device is disfunctional and a place holder, which + * Check, if the device is dysfunctional and a placeholder, which * needs to be handled by the broadcast device. */ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) @@ -391,7 +391,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) * - the broadcast device exists * - the broadcast device is not a hrtimer based one * - the broadcast device is in periodic mode to - * avoid a hickup during switch to oneshot mode + * avoid a hiccup during switch to oneshot mode */ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index f9745d47425a..475ecceda768 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -45,7 +45,7 @@ int tick_program_event(ktime_t expires, int force) } /** - * tick_resume_onshot - resume oneshot mode + * tick_resume_oneshot - resume oneshot mode */ void tick_resume_oneshot(void) { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e10a4af88737..128735e3e77e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -751,7 +751,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) * Aside of that check whether the local timer softirq is * pending. If so its a bad idea to call get_next_timer_interrupt() * because there is an already expired timer, so it will request - * immeditate expiry, which rearms the hardware timer with a + * immediate expiry, which rearms the hardware timer with a * minimal delta which brings us back to this place * immediately. Lather, rinse and repeat... */ diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 4fb06527cf64..d952ae393423 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -29,7 +29,7 @@ enum tick_nohz_mode { * @inidle: Indicator that the CPU is in the tick idle mode * @tick_stopped: Indicator that the idle tick has been stopped * @idle_active: Indicator that the CPU is actively in the tick idle mode; - * it is resetted during irq handling phases. + * it is reset during irq handling phases. * @do_timer_lst: CPU was the last one doing do_timer before going idle * @got_idle_tick: Tick timer function has run with @inidle set * @last_tick: Store the last tick expiry time when the tick diff --git a/kernel/time/time.c b/kernel/time/time.c index 3985b2b32d08..29923b20e0e4 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -571,7 +571,7 @@ EXPORT_SYMBOL(__usecs_to_jiffies); /* * The TICK_NSEC - 1 rounds up the value to the next resolution. Note * that a remainder subtract here would not do the right thing as the - * resolution values don't fall on second boundries. I.e. the line: + * resolution values don't fall on second boundaries. I.e. the line: * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. * Note that due to the small error in the multiplier here, this * rounding is incorrect for sufficiently large values of tv_nsec, but diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6aee5768c86f..77bafd8c8df2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -596,14 +596,14 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); * careful cache layout of the timekeeper because the sequence count and * struct tk_read_base would then need two cache lines instead of one. * - * Access to the time keeper clock source is disabled accross the innermost + * Access to the time keeper clock source is disabled across the innermost * steps of suspend/resume. The accessors still work, but the timestamps * are frozen until time keeping is resumed which happens very early. * * For regular suspend/resume there is no observable difference vs. sched * clock, but it might affect some of the nasty low level debug printks. * - * OTOH, access to sched clock is not guaranteed accross suspend/resume on + * OTOH, access to sched clock is not guaranteed across suspend/resume on * all systems either so it depends on the hardware in use. * * If that turns out to be a real problem then this could be mitigated by @@ -899,7 +899,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset); /** - * ktime_mono_to_any() - convert mononotic time to any other time + * ktime_mono_to_any() - convert monotonic time to any other time * @tmono: time to convert. * @offs: which offset to use */ @@ -1948,7 +1948,7 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, * xtime_nsec_1 = offset + xtime_nsec_2 * Which gives us: * xtime_nsec_2 = xtime_nsec_1 - offset - * Which simplfies to: + * Which simplifies to: * xtime_nsec -= offset */ if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { @@ -2336,7 +2336,7 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc) /* * Validate if a timespec/timeval used to inject a time - * offset is valid. Offsets can be postive or negative, so + * offset is valid. Offsets can be positive or negative, so * we don't check tv_sec. The value of the timeval/timespec * is the sum of its fields,but *NOTE*: * The field tv_usec/tv_nsec must always be non-negative and diff --git a/kernel/time/timer.c b/kernel/time/timer.c index f475f1a027c8..d111adf4a0cb 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -894,7 +894,7 @@ static inline void forward_timer_base(struct timer_base *base) /* * No need to forward if we are close enough below jiffies. * Also while executing timers, base->clk is 1 offset ahead - * of jiffies to avoid endless requeuing to current jffies. + * of jiffies to avoid endless requeuing to current jiffies. */ if ((long)(jnow - base->clk) < 1) return; @@ -1271,7 +1271,7 @@ static inline void timer_base_unlock_expiry(struct timer_base *base) * The counterpart to del_timer_wait_running(). * * If there is a waiter for base->expiry_lock, then it was waiting for the - * timer callback to finish. Drop expiry_lock and reaquire it. That allows + * timer callback to finish. Drop expiry_lock and reacquire it. That allows * the waiter to acquire the lock and make progress. */ static void timer_sync_wait_running(struct timer_base *base) diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 88e6b8ed6ca5..f0d5062d9cbc 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -108,7 +108,7 @@ void update_vsyscall(struct timekeeper *tk) /* * If the current clocksource is not VDSO capable, then spare the - * update of the high reolution parts. + * update of the high resolution parts. */ if (clock_mode != VDSO_CLOCKMODE_NONE) update_vdso_data(vdata, tk); -- cgit From d4c7c28806616809e3baa0b7cd8c665516b2726d Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Thu, 11 Feb 2021 14:43:18 +0100 Subject: timekeeping: Allow runtime PM from change_clocksource() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The struct clocksource callbacks enable() and disable() are described as a way to allow clock sources to enter a power save mode. See commit 4614e6adafa2 ("clocksource: add enable() and disable() callbacks") But using runtime PM from these callbacks triggers a cyclic lockdep warning when switching clock source using change_clocksource(). # echo e60f0000.timer > /sys/devices/system/clocksource/clocksource0/current_clocksource ====================================================== WARNING: possible circular locking dependency detected ------------------------------------------------------ migration/0/11 is trying to acquire lock: ffff0000403ed220 (&dev->power.lock){-...}-{2:2}, at: __pm_runtime_resume+0x40/0x74 but task is already holding lock: ffff8000113c8f88 (tk_core.seq.seqcount){----}-{0:0}, at: multi_cpu_stop+0xa4/0x190 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (tk_core.seq.seqcount){----}-{0:0}: ktime_get+0x28/0xa0 hrtimer_start_range_ns+0x210/0x2dc generic_sched_clock_init+0x70/0x88 sched_clock_init+0x40/0x64 start_kernel+0x494/0x524 -> #1 (hrtimer_bases.lock){-.-.}-{2:2}: hrtimer_start_range_ns+0x68/0x2dc rpm_suspend+0x308/0x5dc rpm_idle+0xc4/0x2a4 pm_runtime_work+0x98/0xc0 process_one_work+0x294/0x6f0 worker_thread+0x70/0x45c kthread+0x154/0x160 ret_from_fork+0x10/0x20 -> #0 (&dev->power.lock){-...}-{2:2}: _raw_spin_lock_irqsave+0x7c/0xc4 __pm_runtime_resume+0x40/0x74 sh_cmt_start+0x1c4/0x260 sh_cmt_clocksource_enable+0x28/0x50 change_clocksource+0x9c/0x160 multi_cpu_stop+0xa4/0x190 cpu_stopper_thread+0x90/0x154 smpboot_thread_fn+0x244/0x270 kthread+0x154/0x160 ret_from_fork+0x10/0x20 other info that might help us debug this: Chain exists of: &dev->power.lock --> hrtimer_bases.lock --> tk_core.seq.seqcount Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(tk_core.seq.seqcount); lock(hrtimer_bases.lock); lock(tk_core.seq.seqcount); lock(&dev->power.lock); *** DEADLOCK *** 2 locks held by migration/0/11: #0: ffff8000113c9278 (timekeeper_lock){-.-.}-{2:2}, at: change_clocksource+0x2c/0x160 #1: ffff8000113c8f88 (tk_core.seq.seqcount){----}-{0:0}, at: multi_cpu_stop+0xa4/0x190 Rework change_clocksource() so it enables the new clocksource and disables the old clocksource outside of the timekeeper_lock and seqcount write held region. There is no requirement that these callbacks are invoked from the lock held region. Signed-off-by: Niklas Söderlund Signed-off-by: Thomas Gleixner Tested-by: Wolfram Sang Link: https://lore.kernel.org/r/20210211134318.323910-1-niklas.soderlund+renesas@ragnatech.se --- kernel/time/timekeeping.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 77bafd8c8df2..81fe2a33b80c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1427,35 +1427,45 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) static int change_clocksource(void *data) { struct timekeeper *tk = &tk_core.timekeeper; - struct clocksource *new, *old; + struct clocksource *new, *old = NULL; unsigned long flags; + bool change = false; new = (struct clocksource *) data; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&tk_core.seq); - - timekeeping_forward_now(tk); /* * If the cs is in module, get a module reference. Succeeds * for built-in code (owner == NULL) as well. */ if (try_module_get(new->owner)) { - if (!new->enable || new->enable(new) == 0) { - old = tk->tkr_mono.clock; - tk_setup_internals(tk, new); - if (old->disable) - old->disable(old); - module_put(old->owner); - } else { + if (!new->enable || new->enable(new) == 0) + change = true; + else module_put(new->owner); - } } + + raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqcount_begin(&tk_core.seq); + + timekeeping_forward_now(tk); + + if (change) { + old = tk->tkr_mono.clock; + tk_setup_internals(tk, new); + } + timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + if (old) { + if (old->disable) + old->disable(old); + + module_put(old->owner); + } + return 0; } -- cgit From 07ff4aed015c564d03fd518d2fb54e5e6948903c Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 3 Mar 2021 11:35:44 +0100 Subject: time/timecounter: Mark 1st argument of timecounter_cyc2time() as const The timecounter is not modified in this function. Mark it as const. Signed-off-by: Marc Kleine-Budde Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210303103544.994855-1-mkl@pengutronix.de --- kernel/time/timecounter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c index 85b98e727306..e6285288d765 100644 --- a/kernel/time/timecounter.c +++ b/kernel/time/timecounter.c @@ -76,7 +76,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, return ns; } -u64 timecounter_cyc2time(struct timecounter *tc, +u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp) { u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; -- cgit From d7840aaadd6e84915866a8f0dab586f6107dadf1 Mon Sep 17 00:00:00 2001 From: Wang Wensheng Date: Fri, 26 Mar 2021 02:23:28 +0000 Subject: tick: Use tick_check_replacement() instead of open coding it The function tick_check_replacement() is the combination of tick_check_percpu() and tick_check_preferred(), but tick_check_new_device() has the same logic open coded. Use the helper to simplify the code. [ tglx: Massage changelog ] Signed-off-by: Wang Wensheng Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210326022328.3266-1-wangwensheng4@huawei.com --- kernel/time/tick-common.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 9d3a22510bab..e15bc0ef1912 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -348,12 +348,7 @@ void tick_check_new_device(struct clock_event_device *newdev) td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; - /* cpu local device ? */ - if (!tick_check_percpu(curdev, newdev, cpu)) - goto out_bc; - - /* Preference decision */ - if (!tick_check_preferred(curdev, newdev)) + if (!tick_check_replacement(curdev, newdev)) goto out_bc; if (!try_module_get(newdev->owner)) -- cgit From 9c336c9935cff267470bb3aaa85c66fac194b650 Mon Sep 17 00:00:00 2001 From: Jindong Yue Date: Wed, 31 Mar 2021 16:33:18 +0800 Subject: tick/broadcast: Allow late registered device to enter oneshot mode The broadcast device is switched to oneshot mode when the system switches to oneshot mode. If a broadcast clock event device is registered after the system switched to oneshot mode, it will stay in periodic mode forever. Ensure that a late registered device which is selected as broadcast device is initialized in oneshot mode when the system already uses oneshot mode. [ tglx: Massage changelog ] Signed-off-by: Jindong Yue Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20210331083318.21794-1-jindong.yue@nxp.com --- kernel/time/tick-broadcast.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 6ec7855ab88d..a44055228796 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -107,6 +107,19 @@ void tick_install_broadcast_device(struct clock_event_device *dev) tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_broadcast_mask)) tick_broadcast_start_periodic(dev); + + if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return; + + /* + * If the system already runs in oneshot mode, switch the newly + * registered broadcast device to oneshot mode explicitly. + */ + if (tick_broadcast_oneshot_active()) { + tick_broadcast_switch_to_oneshot(); + return; + } + /* * Inform all cpus about this. We might be in a situation * where we did not switch to oneshot mode because the per cpu @@ -115,8 +128,7 @@ void tick_install_broadcast_device(struct clock_event_device *dev) * notification the systems stays stuck in periodic mode * forever. */ - if (dev->features & CLOCK_EVT_FEAT_ONESHOT) - tick_clock_notify(); + tick_clock_notify(); } /* -- cgit From 2d036dfa5f10df9782f5278fc591d79d283c1fad Mon Sep 17 00:00:00 2001 From: Chen Jun Date: Wed, 14 Apr 2021 03:04:49 +0000 Subject: posix-timers: Preserve return value in clock_adjtime32() The return value on success (>= 0) is overwritten by the return value of put_old_timex32(). That works correct in the fault case, but is wrong for the success case where put_old_timex32() returns 0. Just check the return value of put_old_timex32() and return -EFAULT in case it is not zero. [ tglx: Massage changelog ] Fixes: 3a4d44b61625 ("ntp: Move adjtimex related compat syscalls to native counterparts") Signed-off-by: Chen Jun Signed-off-by: Thomas Gleixner Reviewed-by: Richard Cochran Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20210414030449.90692-1-chenjun102@huawei.com --- kernel/time/posix-timers.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index bf540f5a4115..dd5697d7347b 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, err = do_clock_adjtime(which_clock, &ktx); - if (err >= 0) - err = put_old_timex32(utp, &ktx); + if (err >= 0 && put_old_timex32(utp, &ktx)) + return -EFAULT; return err; } -- cgit