diff options
Diffstat (limited to 'include/vdso')
| -rw-r--r-- | include/vdso/align.h | 15 | ||||
| -rw-r--r-- | include/vdso/auxclock.h | 13 | ||||
| -rw-r--r-- | include/vdso/bits.h | 10 | ||||
| -rw-r--r-- | include/vdso/cache.h | 15 | ||||
| -rw-r--r-- | include/vdso/clocksource.h | 22 | ||||
| -rw-r--r-- | include/vdso/const.h | 10 | ||||
| -rw-r--r-- | include/vdso/datapage.h | 221 | ||||
| -rw-r--r-- | include/vdso/getrandom.h | 74 | ||||
| -rw-r--r-- | include/vdso/gettime.h | 24 | ||||
| -rw-r--r-- | include/vdso/helpers.h | 87 | ||||
| -rw-r--r-- | include/vdso/jiffies.h | 11 | ||||
| -rw-r--r-- | include/vdso/ktime.h | 16 | ||||
| -rw-r--r-- | include/vdso/limits.h | 19 | ||||
| -rw-r--r-- | include/vdso/math64.h | 62 | ||||
| -rw-r--r-- | include/vdso/page.h | 31 | ||||
| -rw-r--r-- | include/vdso/processor.h | 14 | ||||
| -rw-r--r-- | include/vdso/time.h | 12 | ||||
| -rw-r--r-- | include/vdso/time32.h | 17 | ||||
| -rw-r--r-- | include/vdso/time64.h | 15 | ||||
| -rw-r--r-- | include/vdso/unaligned.h | 15 | ||||
| -rw-r--r-- | include/vdso/vsyscall.h | 14 |
21 files changed, 717 insertions, 0 deletions
diff --git a/include/vdso/align.h b/include/vdso/align.h new file mode 100644 index 000000000000..02dd8626b5c5 --- /dev/null +++ b/include/vdso/align.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_ALIGN_H +#define __VDSO_ALIGN_H + +#include <vdso/const.h> + +/* @a is a power of 2 value */ +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#endif /* __VDSO_ALIGN_H */ diff --git a/include/vdso/auxclock.h b/include/vdso/auxclock.h new file mode 100644 index 000000000000..6d6e74cbc400 --- /dev/null +++ b/include/vdso/auxclock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VDSO_AUXCLOCK_H +#define _VDSO_AUXCLOCK_H + +#include <uapi/linux/time.h> +#include <uapi/linux/types.h> + +static __always_inline u64 aux_clock_resolution_ns(void) +{ + return 1; +} + +#endif /* _VDSO_AUXCLOCK_H */ diff --git a/include/vdso/bits.h b/include/vdso/bits.h new file mode 100644 index 000000000000..388b212088ea --- /dev/null +++ b/include/vdso/bits.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_BITS_H +#define __VDSO_BITS_H + +#include <vdso/const.h> + +#define BIT(nr) (UL(1) << (nr)) +#define BIT_ULL(nr) (ULL(1) << (nr)) + +#endif /* __VDSO_BITS_H */ diff --git a/include/vdso/cache.h b/include/vdso/cache.h new file mode 100644 index 000000000000..f89d48304bf8 --- /dev/null +++ b/include/vdso/cache.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CACHE_H +#define __VDSO_CACHE_H + +#include <asm/cache.h> + +#ifndef SMP_CACHE_BYTES +#define SMP_CACHE_BYTES L1_CACHE_BYTES +#endif + +#ifndef ____cacheline_aligned +#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#endif /* __VDSO_ALIGN_H */ diff --git a/include/vdso/clocksource.h b/include/vdso/clocksource.h new file mode 100644 index 000000000000..c682e7c60273 --- /dev/null +++ b/include/vdso/clocksource.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CLOCKSOURCE_H +#define __VDSO_CLOCKSOURCE_H + +#include <vdso/limits.h> + +#ifdef CONFIG_GENERIC_GETTIMEOFDAY +#include <asm/vdso/clocksource.h> +#endif /* CONFIG_GENERIC_GETTIMEOFDAY */ + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + VDSO_ARCH_CLOCKMODES, +#endif + VDSO_CLOCKMODE_MAX, + + /* Indicator for time namespace VDSO */ + VDSO_CLOCKMODE_TIMENS = INT_MAX +}; + +#endif /* __VDSO_CLOCKSOURCE_H */ diff --git a/include/vdso/const.h b/include/vdso/const.h new file mode 100644 index 000000000000..94b385ad438d --- /dev/null +++ b/include/vdso/const.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CONST_H +#define __VDSO_CONST_H + +#include <uapi/linux/const.h> + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* __VDSO_CONST_H */ diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h new file mode 100644 index 000000000000..23c39b96190f --- /dev/null +++ b/include/vdso/datapage.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_DATAPAGE_H +#define __VDSO_DATAPAGE_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <uapi/linux/bits.h> +#include <uapi/linux/time.h> +#include <uapi/linux/types.h> +#include <uapi/asm-generic/errno-base.h> + +#include <vdso/align.h> +#include <vdso/bits.h> +#include <vdso/cache.h> +#include <vdso/clocksource.h> +#include <vdso/ktime.h> +#include <vdso/limits.h> +#include <vdso/math64.h> +#include <vdso/page.h> +#include <vdso/processor.h> +#include <vdso/time.h> +#include <vdso/time32.h> +#include <vdso/time64.h> + +#ifdef CONFIG_ARCH_HAS_VDSO_TIME_DATA +#include <asm/vdso/time_data.h> +#else +struct arch_vdso_time_data {}; +#endif + +#if defined(CONFIG_ARCH_HAS_VDSO_ARCH_DATA) +#include <asm/vdso/arch_data.h> +#else +struct vdso_arch_data { + /* Needed for the generic code, never actually used at runtime */ + char __unused; +}; +#endif + +#define VDSO_BASES (CLOCK_TAI + 1) +#define VDSO_BASE_AUX 0 +#define VDSO_HRES (BIT(CLOCK_REALTIME) | \ + BIT(CLOCK_MONOTONIC) | \ + BIT(CLOCK_BOOTTIME) | \ + BIT(CLOCK_TAI)) +#define VDSO_COARSE (BIT(CLOCK_REALTIME_COARSE) | \ + BIT(CLOCK_MONOTONIC_COARSE)) +#define VDSO_RAW (BIT(CLOCK_MONOTONIC_RAW)) +#define VDSO_AUX __GENMASK(CLOCK_AUX_LAST, CLOCK_AUX) + +#define CS_HRES_COARSE 0 +#define CS_RAW 1 +#define CS_BASES (CS_RAW + 1) + +/** + * struct vdso_timestamp - basetime per clock_id + * @sec: seconds + * @nsec: nanoseconds + * + * There is one vdso_timestamp object in vvar for each vDSO-accelerated + * clock_id. For high-resolution clocks, this encodes the time + * corresponding to vdso_time_data.cycle_last. For coarse clocks this encodes + * the actual time. + * + * To be noticed that for highres clocks nsec is left-shifted by + * vdso_time_data[x].shift. + */ +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +/** + * struct vdso_clock - vdso per clocksource datapage representation + * @seq: timebase sequence counter + * @clock_mode: clock mode + * @cycle_last: timebase at clocksource init + * @max_cycles: maximum cycles which won't overflow 64bit multiplication + * @mask: clocksource mask + * @mult: clocksource multiplier + * @shift: clocksource shift + * @basetime[clock_id]: basetime per clock_id + * @offset[clock_id]: time namespace offset per clock_id + * + * See also struct vdso_time_data for basic access and ordering information as + * struct vdso_clock is used there. + * + * @basetime is used to store the base time for the system wide time getter + * VVAR page. + * + * @offset is used by the special time namespace VVAR pages which are + * installed instead of the real VVAR page. These namespace pages must set + * @seq to 1 and @clock_mode to VDSO_CLOCKMODE_TIMENS to force the code into + * the time namespace slow path. The namespace aware functions retrieve the + * real system wide VVAR page, read host time and add the per clock offset. + * For clocks which are not affected by time namespace adjustment the + * offset must be zero. + */ +struct vdso_clock { + u32 seq; + + s32 clock_mode; + u64 cycle_last; +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT + u64 max_cycles; +#endif + u64 mask; + u32 mult; + u32 shift; + + union { + struct vdso_timestamp basetime[VDSO_BASES]; + struct timens_offset offset[VDSO_BASES]; + }; +}; + +/** + * struct vdso_time_data - vdso datapage representation + * @arch_data: architecture specific data (optional, defaults + * to an empty struct) + * @clock_data: clocksource related data (array) + * @aux_clock_data: auxiliary clocksource related data (array) + * @tz_minuteswest: minutes west of Greenwich + * @tz_dsttime: type of DST correction + * @hrtimer_res: hrtimer resolution + * @__unused: unused + * + * vdso_time_data will be accessed by 64 bit and compat code at the same time + * so we should be careful before modifying this structure. + * + * The ordering of the struct members is optimized to have fast acces to the + * often required struct members which are related to CLOCK_REALTIME and + * CLOCK_MONOTONIC. This information is stored in the first cache lines. + */ +struct vdso_time_data { + struct arch_vdso_time_data arch_data; + + struct vdso_clock clock_data[CS_BASES]; + struct vdso_clock aux_clock_data[MAX_AUX_CLOCKS]; + + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; +} ____cacheline_aligned; + +/** + * struct vdso_rng_data - vdso RNG state information + * @generation: counter representing the number of RNG reseeds + * @is_ready: boolean signaling whether the RNG is initialized + */ +struct vdso_rng_data { + u64 generation; + u8 is_ready; +}; + +/* + * We use the hidden visibility to prevent the compiler from generating a GOT + * relocation. Not only is going through a GOT useless (the entry couldn't and + * must not be overridden by another library), it does not even work: the linker + * cannot generate an absolute address to the data page. + * + * With the hidden visibility, the compiler simply generates a PC-relative + * relocation, and this is what we need. + */ +extern struct vdso_time_data vdso_u_time_data __attribute__((visibility("hidden"))); +extern struct vdso_rng_data vdso_u_rng_data __attribute__((visibility("hidden"))); +extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden"))); + +extern struct vdso_time_data *vdso_k_time_data; +extern struct vdso_rng_data *vdso_k_rng_data; +extern struct vdso_arch_data *vdso_k_arch_data; + +#define VDSO_ARCH_DATA_SIZE ALIGN(sizeof(struct vdso_arch_data), PAGE_SIZE) +#define VDSO_ARCH_DATA_PAGES (VDSO_ARCH_DATA_SIZE >> PAGE_SHIFT) + +enum vdso_pages { + VDSO_TIME_PAGE_OFFSET, + VDSO_TIMENS_PAGE_OFFSET, + VDSO_RNG_PAGE_OFFSET, + VDSO_ARCH_PAGES_START, + VDSO_ARCH_PAGES_END = VDSO_ARCH_PAGES_START + VDSO_ARCH_DATA_PAGES - 1, + VDSO_NR_PAGES +}; + +/* + * The generic vDSO implementation requires that gettimeofday.h + * provides: + * - __arch_get_hw_counter(): to get the hw counter based on the + * clock_mode. + * - gettimeofday_fallback(): fallback for gettimeofday. + * - clock_gettime_fallback(): fallback for clock_gettime. + * - clock_getres_fallback(): fallback for clock_getres. + */ +#include <asm/vdso/gettimeofday.h> + +#else /* !__ASSEMBLY__ */ + +#ifdef CONFIG_VDSO_GETRANDOM +#define __vdso_u_rng_data PROVIDE(vdso_u_rng_data = vdso_u_data + 2 * PAGE_SIZE); +#else +#define __vdso_u_rng_data +#endif + +#ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA +#define __vdso_u_arch_data PROVIDE(vdso_u_arch_data = vdso_u_data + 3 * PAGE_SIZE); +#else +#define __vdso_u_arch_data +#endif + +#define VDSO_VVAR_SYMS \ + PROVIDE(vdso_u_data = . - __VDSO_PAGES * PAGE_SIZE); \ + PROVIDE(vdso_u_time_data = vdso_u_data); \ + __vdso_u_rng_data \ + __vdso_u_arch_data \ + + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_DATAPAGE_H */ diff --git a/include/vdso/getrandom.h b/include/vdso/getrandom.h new file mode 100644 index 000000000000..6ca4d6de9e46 --- /dev/null +++ b/include/vdso/getrandom.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _VDSO_GETRANDOM_H +#define _VDSO_GETRANDOM_H + +#include <linux/types.h> + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 + +/** + * struct vgetrandom_state - State used by vDSO getrandom(). + * + * @batch: One and a half ChaCha20 blocks of buffered RNG output. + * + * @key: Key to be used for generating next batch. + * + * @batch_key: Union of the prior two members, which is exactly two full + * ChaCha20 blocks in size, so that @batch and @key can be filled + * together. + * + * @generation: Snapshot of @rng_info->generation in the vDSO data page at + * the time @key was generated. + * + * @pos: Offset into @batch of the next available random byte. + * + * @in_use: Reentrancy guard for reusing a state within the same thread + * due to signal handlers. + */ +struct vgetrandom_state { + union { + struct { + u8 batch[CHACHA_BLOCK_SIZE * 3 / 2]; + u32 key[CHACHA_KEY_SIZE / sizeof(u32)]; + }; + u8 batch_key[CHACHA_BLOCK_SIZE * 2]; + }; + u64 generation; + u8 pos; + bool in_use; +}; + +/** + * __arch_chacha20_blocks_nostack - Generate ChaCha20 stream without using the stack. + * @dst_bytes: Destination buffer to hold @nblocks * 64 bytes of output. + * @key: 32-byte input key. + * @counter: 8-byte counter, read on input and updated on return. + * @nblocks: Number of blocks to generate. + * + * Generates a given positive number of blocks of ChaCha20 output with nonce=0, and does not write + * to any stack or memory outside of the parameters passed to it, in order to mitigate stack data + * leaking into forked child processes. + */ +extern void __arch_chacha20_blocks_nostack(u8 *dst_bytes, const u32 *key, u32 *counter, size_t nblocks); + +/** + * __vdso_getrandom - Architecture-specific vDSO implementation of getrandom() syscall. + * @buffer: Passed to __cvdso_getrandom(). + * @len: Passed to __cvdso_getrandom(). + * @flags: Passed to __cvdso_getrandom(). + * @opaque_state: Passed to __cvdso_getrandom(). + * @opaque_len: Passed to __cvdso_getrandom(); + * + * This function is implemented by making a single call to to __cvdso_getrandom(), whose + * documentation may be consulted for more information. + * + * Returns: The return value of __cvdso_getrandom(). + */ +extern ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len); + +#endif /* _VDSO_GETRANDOM_H */ diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h new file mode 100644 index 000000000000..9ac161866653 --- /dev/null +++ b/include/vdso/gettime.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VDSO_GETTIME_H +#define _VDSO_GETTIME_H + +#include <linux/types.h> + +struct __kernel_timespec; +struct __kernel_old_timeval; +struct timezone; + +#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) +struct old_timespec32; +int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res); +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +#else +int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +#endif + +__kernel_old_time_t __vdso_time(__kernel_old_time_t *t); +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts); + +#endif diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h new file mode 100644 index 000000000000..1a5ee9d9052c --- /dev/null +++ b/include/vdso/helpers.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_HELPERS_H +#define __VDSO_HELPERS_H + +#ifndef __ASSEMBLY__ + +#include <asm/barrier.h> +#include <vdso/datapage.h> + +static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc) +{ + u32 seq; + + while (unlikely((seq = READ_ONCE(vc->seq)) & 1)) + cpu_relax(); + + smp_rmb(); + return seq; +} + +static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc, + u32 start) +{ + u32 seq; + + smp_rmb(); + seq = READ_ONCE(vc->seq); + return seq != start; +} + +static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc) +{ + /* + * WRITE_ONCE() is required otherwise the compiler can validly tear + * updates to vc->seq and it is possible that the value seen by the + * reader is inconsistent. + */ + WRITE_ONCE(vc->seq, vc->seq + 1); +} + +static __always_inline void vdso_write_seq_end(struct vdso_clock *vc) +{ + /* + * WRITE_ONCE() is required otherwise the compiler can validly tear + * updates to vc->seq and it is possible that the value seen by the + * reader is inconsistent. + */ + WRITE_ONCE(vc->seq, vc->seq + 1); +} + +static __always_inline void vdso_write_begin_clock(struct vdso_clock *vc) +{ + vdso_write_seq_begin(vc); + /* Ensure the sequence invalidation is visible before data is modified */ + smp_wmb(); +} + +static __always_inline void vdso_write_end_clock(struct vdso_clock *vc) +{ + /* Ensure the data update is visible before the sequence is set valid again */ + smp_wmb(); + vdso_write_seq_end(vc); +} + +static __always_inline void vdso_write_begin(struct vdso_time_data *vd) +{ + struct vdso_clock *vc = vd->clock_data; + + vdso_write_seq_begin(&vc[CS_HRES_COARSE]); + vdso_write_seq_begin(&vc[CS_RAW]); + /* Ensure the sequence invalidation is visible before data is modified */ + smp_wmb(); +} + +static __always_inline void vdso_write_end(struct vdso_time_data *vd) +{ + struct vdso_clock *vc = vd->clock_data; + + /* Ensure the data update is visible before the sequence is set valid again */ + smp_wmb(); + vdso_write_seq_end(&vc[CS_HRES_COARSE]); + vdso_write_seq_end(&vc[CS_RAW]); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_HELPERS_H */ diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h new file mode 100644 index 000000000000..8ca04a141412 --- /dev/null +++ b/include/vdso/jiffies.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_JIFFIES_H +#define __VDSO_JIFFIES_H + +#include <asm/param.h> /* for HZ */ +#include <vdso/time64.h> + +/* TICK_NSEC is the time between ticks in nsec */ +#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) + +#endif /* __VDSO_JIFFIES_H */ diff --git a/include/vdso/ktime.h b/include/vdso/ktime.h new file mode 100644 index 000000000000..a0fd07239e0e --- /dev/null +++ b/include/vdso/ktime.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_KTIME_H +#define __VDSO_KTIME_H + +#include <vdso/jiffies.h> + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (LOW_RES_NSEC) + +#endif /* __VDSO_KTIME_H */ diff --git a/include/vdso/limits.h b/include/vdso/limits.h new file mode 100644 index 000000000000..0197888ad0e0 --- /dev/null +++ b/include/vdso/limits.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_LIMITS_H +#define __VDSO_LIMITS_H + +#define USHRT_MAX ((unsigned short)~0U) +#define SHRT_MAX ((short)(USHRT_MAX >> 1)) +#define SHRT_MIN ((short)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U >> 1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL >> 1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL >> 1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#define UINTPTR_MAX ULONG_MAX + +#endif /* __VDSO_LIMITS_H */ diff --git a/include/vdso/math64.h b/include/vdso/math64.h new file mode 100644 index 000000000000..22ae212f8b28 --- /dev/null +++ b/include/vdso/math64.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_MATH64_H +#define __VDSO_MATH64_H + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_add_u64_shr +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + return (u64)((((unsigned __int128)a * mul) + b) >> shift); +} +#endif /* mul_u64_u32_add_u64_shr */ + +#else + +#ifndef mul_u64_u32_add_u64_shr +#ifndef mul_u32_u32 +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#define mul_u32_u32 mul_u32_u32 +#endif +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + u32 ah = a >> 32, al = a; + bool ovf; + u64 ret; + + ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); + ret >>= shift; + if (ovf && shift) + ret += 1ULL << (64 - shift); + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_add_u64_shr */ + +#endif + +#endif /* __VDSO_MATH64_H */ diff --git a/include/vdso/page.h b/include/vdso/page.h new file mode 100644 index 000000000000..bc47186c07fc --- /dev/null +++ b/include/vdso/page.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_PAGE_H +#define __VDSO_PAGE_H + +#include <uapi/linux/const.h> + +/* + * PAGE_SHIFT determines the page size. + * + * Note: This definition is required because PAGE_SHIFT is used + * in several places throughout the codebase. + */ +#define PAGE_SHIFT CONFIG_PAGE_SHIFT + +#define PAGE_SIZE (_AC(1,UL) << CONFIG_PAGE_SHIFT) + +#if !defined(CONFIG_64BIT) +/* + * Applies only to 32-bit architectures. + * + * Subtle: (1 << CONFIG_PAGE_SHIFT) is an int, not an unsigned long. + * So if we assign PAGE_MASK to a larger type it gets extended the + * way we want (i.e. with 1s in the high bits) while masking a + * 64-bit value such as phys_addr_t. + */ +#define PAGE_MASK (~((1 << CONFIG_PAGE_SHIFT) - 1)) +#else +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#endif + +#endif /* __VDSO_PAGE_H */ diff --git a/include/vdso/processor.h b/include/vdso/processor.h new file mode 100644 index 000000000000..fbe8265ea3c4 --- /dev/null +++ b/include/vdso/processor.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __VDSO_PROCESSOR_H +#define __VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso/processor.h> + +#endif /* __ASSEMBLY__ */ + +#endif /* __VDSO_PROCESSOR_H */ diff --git a/include/vdso/time.h b/include/vdso/time.h new file mode 100644 index 000000000000..739f53cd2949 --- /dev/null +++ b/include/vdso/time.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME_H +#define __VDSO_TIME_H + +#include <uapi/linux/types.h> + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +#endif /* __VDSO_TIME_H */ diff --git a/include/vdso/time32.h b/include/vdso/time32.h new file mode 100644 index 000000000000..fdf56f932f67 --- /dev/null +++ b/include/vdso/time32.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME32_H +#define __VDSO_TIME32_H + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +#endif /* __VDSO_TIME32_H */ diff --git a/include/vdso/time64.h b/include/vdso/time64.h new file mode 100644 index 000000000000..b40cfa2aa33c --- /dev/null +++ b/include/vdso/time64.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME64_H +#define __VDSO_TIME64_H + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define PSEC_PER_SEC 1000000000000LL +#define FSEC_PER_SEC 1000000000000000LL + +#endif /* __VDSO_TIME64_H */ diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h new file mode 100644 index 000000000000..ff0c06b6513e --- /dev/null +++ b/include/vdso/unaligned.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_UNALIGNED_H +#define __VDSO_UNALIGNED_H + +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr); \ + __get_pptr->x; \ +}) + +#define __put_unaligned_t(type, val, ptr) do { \ + struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr); \ + __put_pptr->x = (val); \ +} while (0) + +#endif /* __VDSO_UNALIGNED_H */ diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h new file mode 100644 index 000000000000..b0fdc9c6bf43 --- /dev/null +++ b/include/vdso/vsyscall.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_VSYSCALL_H +#define __VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso/vsyscall.h> + +unsigned long vdso_update_begin(void); +void vdso_update_end(unsigned long flags); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_VSYSCALL_H */ |
