diff options
Diffstat (limited to 'arch/s390/kernel/time.c')
-rw-r--r-- | arch/s390/kernel/time.c | 121 |
1 files changed, 5 insertions, 116 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fed17d407a44..63517b85f4c9 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -69,8 +69,6 @@ unsigned char ptff_function_mask[16]; static unsigned long lpar_offset; static unsigned long initial_leap_seconds; -static unsigned long tod_steering_end; -static long tod_steering_delta; /* * Get time offsets with PTFF @@ -80,9 +78,7 @@ void __init time_early_init(void) struct ptff_qto qto; struct ptff_qui qui; - /* Initialize TOD steering parameters */ - tod_steering_end = tod_clock_base.tod; - vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end; + vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod; if (!test_facility(28)) return; @@ -226,21 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time, static u64 read_tod_clock(struct clocksource *cs) { - unsigned long now, adj; - - preempt_disable(); /* protect from changes to steering parameters */ - now = get_tod_clock(); - adj = tod_steering_end - now; - if (unlikely((s64) adj > 0)) - /* - * manually steer by 1 cycle every 2^16 cycles. This - * corresponds to shifting the tod delta by 15. 1s is - * therefore steered in ~9h. The adjust will decrease - * over time, until it finally reaches 0. - */ - now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); - preempt_enable(); - return now; + return get_tod_clock_monotonic(); } static struct clocksource clocksource_tod = { @@ -369,26 +351,11 @@ static inline int check_sync_clock(void) */ static void clock_sync_global(long delta) { - unsigned long now, adj; struct ptff_qto qto; /* Fixup the monotonic sched clock. */ tod_clock_base.eitod += delta; - /* Adjust TOD steering parameters. */ - now = get_tod_clock(); - adj = tod_steering_end - now; - if (unlikely((s64) adj >= 0)) - /* Calculate how much of the old adjustment is left. */ - tod_steering_delta = (tod_steering_delta < 0) ? - -(adj >> 15) : (adj >> 15); - tod_steering_delta += delta; - if ((abs(tod_steering_delta) >> 48) != 0) - panic("TOD clock sync offset %li is too large to drift\n", - tod_steering_delta); - tod_steering_end = now + (abs(tod_steering_delta) << 15); - vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end; - vdso_k_time_data->arch_data.tod_steering_delta = tod_steering_delta; - + vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod; /* Update LPAR offset. */ if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) lpar_offset = qto.tod_epoch_difference; @@ -430,7 +397,7 @@ struct clock_sync_data { /* * Server Time Protocol (STP) code. */ -static bool stp_online; +static bool stp_online = true; static struct stp_sstpi stp_info; static void *stp_page; @@ -456,7 +423,6 @@ static void __init stp_reset(void) if (rc == 0) set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); else if (stp_online) { - pr_warn("The real or virtual hardware system does not provide an STP interface\n"); free_page((unsigned long) stp_page); stp_page = NULL; stp_online = false; @@ -580,7 +546,7 @@ static int stp_sync_clock(void *data) atomic_dec(&sync->cpus); /* Wait for in_sync to be set. */ while (READ_ONCE(sync->in_sync) == 0) - __udelay(1); + ; } if (sync->in_sync != 1) /* Didn't work. Clear per-cpu in sync bit again. */ @@ -591,81 +557,6 @@ static int stp_sync_clock(void *data) return 0; } -static int stp_clear_leap(void) -{ - struct __kernel_timex txc; - int ret; - - memset(&txc, 0, sizeof(txc)); - - ret = do_adjtimex(&txc); - if (ret < 0) - return ret; - - txc.modes = ADJ_STATUS; - txc.status &= ~(STA_INS|STA_DEL); - return do_adjtimex(&txc); -} - -static void stp_check_leap(void) -{ - struct stp_stzi stzi; - struct stp_lsoib *lsoib = &stzi.lsoib; - struct __kernel_timex txc; - int64_t timediff; - int leapdiff, ret; - - if (!stp_info.lu || !check_sync_clock()) { - /* - * Either a scheduled leap second was removed by the operator, - * or STP is out of sync. In both cases, clear the leap second - * kernel flags. - */ - if (stp_clear_leap() < 0) - pr_err("failed to clear leap second flags\n"); - return; - } - - if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) { - pr_err("stzi failed\n"); - return; - } - - timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC; - leapdiff = lsoib->nlso - lsoib->also; - - if (leapdiff != 1 && leapdiff != -1) { - pr_err("Cannot schedule %d leap seconds\n", leapdiff); - return; - } - - if (timediff < 0) { - if (stp_clear_leap() < 0) - pr_err("failed to clear leap second flags\n"); - } else if (timediff < 7200) { - memset(&txc, 0, sizeof(txc)); - ret = do_adjtimex(&txc); - if (ret < 0) - return; - - txc.modes = ADJ_STATUS; - if (leapdiff > 0) - txc.status |= STA_INS; - else - txc.status |= STA_DEL; - ret = do_adjtimex(&txc); - if (ret < 0) - pr_err("failed to set leap second flags\n"); - /* arm Timer to clear leap second flags */ - mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400)); - } else { - /* The day the leap second is scheduled for hasn't been reached. Retry - * in one hour. - */ - mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600)); - } -} - /* * STP work. Check for the STP state and take over the clock * synchronization if the STP clock source is usable. @@ -707,8 +598,6 @@ static void stp_work_fn(struct work_struct *work) * Retry after a second. */ mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC)); - else if (stp_info.lu) - stp_check_leap(); out_unlock: mutex_unlock(&stp_mutex); |