diff options
Diffstat (limited to 'include/linux/random.h')
| -rw-r--r-- | include/linux/random.h | 244 |
1 files changed, 93 insertions, 151 deletions
diff --git a/include/linux/random.h b/include/linux/random.h index 445a0ea4ff49..8a8064dc3970 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -1,56 +1,46 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/linux/random.h - * - * Include file for the random number generator. - */ + #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include <linux/bug.h> +#include <linux/kernel.h> #include <linux/list.h> -#include <linux/once.h> #include <uapi/linux/random.h> -struct random_ready_callback { - struct list_head list; - void (*func)(struct random_ready_callback *rdy); - struct module *owner; -}; +struct notifier_block; -extern void add_device_randomness(const void *, unsigned int); +void add_device_randomness(const void *buf, size_t len); +void __init add_bootloader_randomness(const void *buf, size_t len); +void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +void add_interrupt_randomness(int irq) __latent_entropy; +void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after); -#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__) static inline void add_latent_entropy(void) { - add_device_randomness((const void *)&latent_entropy, - sizeof(latent_entropy)); -} +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); #else -static inline void add_latent_entropy(void) {} + add_device_randomness(NULL, 0); #endif +} -extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) __latent_entropy; -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; - -extern void get_random_bytes(void *buf, int nbytes); -extern int wait_for_random_bytes(void); -extern bool rng_is_initialized(void); -extern int add_random_ready_callback(struct random_ready_callback *rdy); -extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); - -#ifndef MODULE -extern const struct file_operations random_fops, urandom_fops; +#if IS_ENABLED(CONFIG_VMGENID) +void add_vmfork_randomness(const void *unique_vm_id, size_t len); +int register_random_vmfork_notifier(struct notifier_block *nb); +int unregister_random_vmfork_notifier(struct notifier_block *nb); +#else +static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } +static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } #endif +void get_random_bytes(void *buf, size_t len); +u8 get_random_u8(void); +u16 get_random_u16(void); u32 get_random_u32(void); u64 get_random_u64(void); -static inline unsigned int get_random_int(void) -{ - return get_random_u32(); -} static inline unsigned long get_random_long(void) { #if BITS_PER_LONG == 64 @@ -60,141 +50,93 @@ static inline unsigned long get_random_long(void) #endif } +u32 __get_random_u32_below(u32 ceil); + /* - * On 64-bit architectures, protect against non-terminated C string overflows - * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. + * Returns a random integer in the interval [0, ceil), with uniform + * distribution, suitable for all uses. Fastest when ceil is a constant, but + * still fast for variable ceil as well. */ -#ifdef CONFIG_64BIT -# ifdef __LITTLE_ENDIAN -# define CANARY_MASK 0xffffffffffffff00UL -# else /* big endian, 64 bits: */ -# define CANARY_MASK 0x00ffffffffffffffUL -# endif -#else /* 32 bits: */ -# define CANARY_MASK 0xffffffffUL -#endif - -static inline unsigned long get_random_canary(void) +static inline u32 get_random_u32_below(u32 ceil) { - unsigned long val = get_random_long(); - - return val & CANARY_MASK; -} - -/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). - * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) -{ - int ret = wait_for_random_bytes(); - get_random_bytes(buf, nbytes); - return ret; -} - -#define declare_get_random_var_wait(var) \ - static inline int get_random_ ## var ## _wait(var *out) { \ - int ret = wait_for_random_bytes(); \ - if (unlikely(ret)) \ - return ret; \ - *out = get_random_ ## var(); \ - return 0; \ + if (!__builtin_constant_p(ceil)) + return __get_random_u32_below(ceil); + + /* + * For the fast path, below, all operations on ceil are precomputed by + * the compiler, so this incurs no overhead for checking pow2, doing + * divisions, or branching based on integer size. The resultant + * algorithm does traditional reciprocal multiplication (typically + * optimized by the compiler into shifts and adds), rejecting samples + * whose lower half would indicate a range indivisible by ceil. + */ + BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); + if (ceil <= 1) + return 0; + for (;;) { + if (ceil <= 1U << 8) { + u32 mult = ceil * get_random_u8(); + if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) + return mult >> 8; + } else if (ceil <= 1U << 16) { + u32 mult = ceil * get_random_u16(); + if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) + return mult >> 16; + } else { + u64 mult = (u64)ceil * get_random_u32(); + if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) + return mult >> 32; + } } -declare_get_random_var_wait(u32) -declare_get_random_var_wait(u64) -declare_get_random_var_wait(int) -declare_get_random_var_wait(long) -#undef declare_get_random_var - -unsigned long randomize_page(unsigned long start, unsigned long range); - -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); } /* - * Handle minimum values for seeds + * Returns a random integer in the interval (floor, U32_MAX], with uniform + * distribution, suitable for all uses. Fastest when floor is a constant, but + * still fast for variable floor as well. */ -static inline u32 __seed(u32 x, u32 m) +static inline u32 get_random_u32_above(u32 floor) { - return (x < m) ? x + m : x; + BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX, + "get_random_u32_above() must take floor < U32_MAX"); + return floor + 1 + get_random_u32_below(U32_MAX - floor); } -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. +/* + * Returns a random integer in the interval [floor, ceil], with uniform + * distribution, suitable for all uses. Fastest when floor and ceil are + * constant, but still fast for variable floor and ceil as well. */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil) { - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); + BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) && + (floor > ceil || ceil - floor == U32_MAX), + "get_random_u32_inclusive() must take floor <= ceil"); + return floor + get_random_u32_below(ceil - floor + 1); } -#ifdef CONFIG_ARCH_RANDOM -# include <asm/archrandom.h> -#else -static inline bool arch_get_random_long(unsigned long *v) -{ - return 0; -} -static inline bool arch_get_random_int(unsigned int *v) -{ - return 0; -} -static inline bool arch_has_random(void) -{ - return 0; -} -static inline bool arch_get_random_seed_long(unsigned long *v) -{ - return 0; -} -static inline bool arch_get_random_seed_int(unsigned int *v) -{ - return 0; -} -static inline bool arch_has_random_seed(void) +void __init random_init_early(const char *command_line); +void __init random_init(void); +bool rng_is_initialized(void); +int wait_for_random_bytes(void); +int execute_with_initialized_rng(struct notifier_block *nb); + +/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). + * Returns the result of the call to wait_for_random_bytes. */ +static inline int get_random_bytes_wait(void *buf, size_t nbytes) { - return 0; + int ret = wait_for_random_bytes(); + get_random_bytes(buf, nbytes); + return ret; } + +#ifdef CONFIG_SMP +int random_prepare_cpu(unsigned int cpu); +int random_online_cpu(unsigned int cpu); #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} +#ifndef MODULE +extern const struct file_operations random_fops, urandom_fops; +#endif #endif /* _LINUX_RANDOM_H */ |
