summaryrefslogtreecommitdiff
path: root/lib/vdso/gettimeofday.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2024-03-25 08:40:11 +0200
committerThomas Gleixner <tglx@linutronix.de>2024-04-08 15:03:07 +0200
commit456e3788bc7164c1c8298045e04068b8e3d8e413 (patch)
tree09efbe2666a269c3da3a88e9dba4ff3498b30a23 /lib/vdso/gettimeofday.c
parentd2e58ab5cda2a225c406ac10d0a8b960bc5a39b6 (diff)
vdso: Make delta calculation overflow safe
Kernel timekeeping is designed to keep the change in cycles (since the last timer interrupt) below max_cycles, which prevents multiplication overflow when converting cycles to nanoseconds. However, if timer interrupts stop, the calculation will eventually overflow. Add protection against that, enabled by config option CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT. Check against max_cycles, falling back to a slower higher precision calculation. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-8-adrian.hunter@intel.com
Diffstat (limited to 'lib/vdso/gettimeofday.c')
-rw-r--r--lib/vdso/gettimeofday.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 9fa90e0794c9..9c3a8d2440c9 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -13,6 +13,18 @@
# define VDSO_DELTA_MASK(vd) (vd->mask)
#endif
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+ return delta < vd->max_cycles;
+}
+#else
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+ return true;
+}
+#endif
+
#ifndef vdso_shift_ns
static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
{
@@ -28,7 +40,10 @@ static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles,
{
u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
- return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+ if (likely(vdso_delta_ok(vd, delta)))
+ return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+
+ return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
}
#endif /* vdso_calc_ns */