summaryrefslogtreecommitdiff
path: root/lib/vdso/gettimeofday.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/vdso/gettimeofday.c')
-rw-r--r--lib/vdso/gettimeofday.c153
1 files changed, 100 insertions, 53 deletions
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index f8b8ec5e63ac..a2909af4b924 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -2,30 +2,9 @@
/*
* Generic userspace implementations of gettimeofday() and similar.
*/
-#include <linux/compiler.h>
-#include <linux/math64.h>
-#include <linux/time.h>
-#include <linux/kernel.h>
-#include <linux/hrtimer_defs.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-/*
- * The generic vDSO implementation requires that gettimeofday.h
- * provides:
- * - __arch_get_vdso_data(): to get the vdso datapage.
- * - __arch_get_hw_counter(): to get the hw counter based on the
- * clock_mode.
- * - gettimeofday_fallback(): fallback for gettimeofday.
- * - clock_gettime_fallback(): fallback for clock_gettime.
- * - clock_getres_fallback(): fallback for clock_getres.
- */
-#ifdef ENABLE_COMPAT_VDSO
-#include <asm/vdso/compat_gettimeofday.h>
-#else
-#include <asm/vdso/gettimeofday.h>
-#endif /* ENABLE_COMPAT_VDSO */
-
#ifndef vdso_calc_delta
/*
* Default implementation which works for all sane clocksources. That
@@ -38,6 +17,27 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
}
#endif
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
+#endif
+
+#ifndef __arch_vdso_hres_capable
+static inline bool __arch_vdso_hres_capable(void)
+{
+ return true;
+}
+#endif
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
#ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
@@ -57,14 +57,15 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
do {
seq = vdso_read_begin(vd);
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -101,12 +102,16 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
u64 cycles, last, sec, ns;
u32 seq;
+ /* Allows to compile the high resolution parts out */
+ if (!__arch_vdso_hres_capable())
+ return -1;
+
do {
/*
- * Open coded to handle VCLOCK_TIMENS. Time namespace
+ * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
* enabled tasks have a special VVAR page installed which
* has vd->seq set to 1 and vd->clock_mode set to
- * VCLOCK_TIMENS. For non time namespace affected tasks
+ * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
* this does not affect performance because if vd->seq is
* odd, i.e. a concurrent update is in progress the extra
* check for vd->clock_mode is just a few extra
@@ -115,20 +120,20 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_hres_timens(vd, clk, ts);
cpu_relax();
}
smp_rmb();
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
cycles = __arch_get_hw_counter(vd->clock_mode);
ns = vdso_ts->nsec;
last = vd->cycle_last;
- if (unlikely((s64)cycles < 0))
- return -1;
-
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns >>= vd->shift;
+ ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -187,12 +192,12 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
do {
/*
- * Open coded to handle VCLOCK_TIMENS. See comment in
+ * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
* do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_coarse_timens(vd, clk, ts);
cpu_relax();
}
@@ -206,9 +211,9 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
@@ -233,23 +238,31 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
{
- int ret = __cvdso_clock_gettime_common(clock, ts);
+ int ret = __cvdso_clock_gettime_common(vd, clock, ts);
if (unlikely(ret))
return clock_gettime_fallback(clock, ts);
return 0;
}
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_gettime_common(clock, &ts);
+ ret = __cvdso_clock_gettime_common(vd, clock, &ts);
if (unlikely(ret))
return clock_gettime32_fallback(clock, res);
@@ -260,12 +273,18 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
-__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+__cvdso_gettimeofday_data(const struct vdso_data *vd,
+ struct __kernel_old_timeval *tv, struct timezone *tz)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
@@ -279,7 +298,7 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
- vd->clock_mode == VCLOCK_TIMENS)
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
@@ -289,13 +308,20 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
return 0;
}
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+}
+
#ifdef VDSO_HAS_TIME
-static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+static __maybe_unused __kernel_old_time_t
+__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
__kernel_old_time_t t;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
@@ -305,13 +331,18 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
return t;
}
+
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+{
+ return __cvdso_time_data(__arch_get_vdso_data(), time);
+}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
u64 ns;
@@ -319,7 +350,8 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
if (unlikely((u32) clock >= MAX_CLOCKS))
return -1;
- if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data();
/*
@@ -349,23 +381,31 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
}
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
{
- int ret = __cvdso_clock_getres_common(clock, res);
+ int ret = __cvdso_clock_getres_common(vd, clock, res);
if (unlikely(ret))
return clock_getres_fallback(clock, res);
return 0;
}
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+}
+
#ifdef BUILD_VDSO32
static __maybe_unused int
-__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
- ret = __cvdso_clock_getres_common(clock, &ts);
+ ret = __cvdso_clock_getres_common(vd, clock, &ts);
if (unlikely(ret))
return clock_getres32_fallback(clock, res);
@@ -376,5 +416,12 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
}
return ret;
}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ clock, res);
+}
#endif /* BUILD_VDSO32 */
#endif /* VDSO_HAS_CLOCK_GETRES */