diff options
Diffstat (limited to 'drivers/char/random.c')
| -rw-r--r-- | drivers/char/random.c | 149 |
1 files changed, 88 insertions, 61 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index ce3ccd172cc8..bab03c7c4194 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* - * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. * @@ -56,6 +56,11 @@ #include <linux/sched/isolation.h> #include <crypto/chacha.h> #include <crypto/blake2s.h> +#ifdef CONFIG_VDSO_GETRANDOM +#include <vdso/getrandom.h> +#include <vdso/datapage.h> +#include <vdso/vsyscall.h> +#endif #include <asm/archrandom.h> #include <asm/processor.h> #include <asm/irq.h> @@ -254,8 +259,8 @@ static void crng_reseed(struct work_struct *work) u8 key[CHACHA_KEY_SIZE]; /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */ - if (likely(system_unbound_wq)) - queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval()); + if (likely(system_dfl_wq)) + queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval()); extract_entropy(key, sizeof(key)); @@ -271,6 +276,22 @@ static void crng_reseed(struct work_struct *work) if (next_gen == ULONG_MAX) ++next_gen; WRITE_ONCE(base_crng.generation, next_gen); +#ifdef CONFIG_VDSO_GETRANDOM + /* base_crng.generation's invalid value is ULONG_MAX, while + * vdso_k_rng_data->generation's invalid value is 0, so add one to the + * former to arrive at the latter. Use smp_store_release so that this + * is ordered with the write above to base_crng.generation. Pairs with + * the smp_rmb() before the syscall in the vDSO code. + * + * Cast to unsigned long for 32-bit architectures, since atomic 64-bit + * operations are not supported on those architectures. This is safe + * because base_crng.generation is a 32-bit value. On big-endian + * architectures it will be stored in the upper 32 bits, but that's okay + * because the vDSO side only checks whether the value changed, without + * actually using or interpreting the value. + */ + smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); +#endif if (!static_branch_likely(&crng_is_ready)) crng_init = CRNG_READY; spin_unlock_irqrestore(&base_crng.lock, flags); @@ -288,11 +309,11 @@ static void crng_reseed(struct work_struct *work) * key value, at index 4, so the state should always be zeroed out * immediately after using in order to maintain forward secrecy. * If the state cannot be erased in a timely manner, then it is - * safer to set the random_data parameter to &chacha_state[4] so - * that this function overwrites it before returning. + * safer to set the random_data parameter to &chacha_state->x[4] + * so that this function overwrites it before returning. */ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], - u32 chacha_state[CHACHA_STATE_WORDS], + struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { u8 first_block[CHACHA_BLOCK_SIZE]; @@ -300,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], BUG_ON(random_data_len > 32); chacha_init_consts(chacha_state); - memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); - memset(&chacha_state[12], 0, sizeof(u32) * 4); + memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE); + memset(&chacha_state->x[12], 0, sizeof(u32) * 4); chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); @@ -314,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], * random data. It also returns up to 32 bytes on its own of random data * that may be used; random_data_len may not be greater than 32. */ -static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], +static void crng_make_state(struct chacha_state *chacha_state, u8 *random_data, size_t random_data_len) { unsigned long flags; @@ -374,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], static void _get_random_bytes(void *buf, size_t len) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 tmp[CHACHA_BLOCK_SIZE]; size_t first_block_len; @@ -382,31 +403,31 @@ static void _get_random_bytes(void *buf, size_t len) return; first_block_len = min_t(size_t, 32, len); - crng_make_state(chacha_state, buf, first_block_len); + crng_make_state(&chacha_state, buf, first_block_len); len -= first_block_len; buf += first_block_len; while (len) { if (len < CHACHA_BLOCK_SIZE) { - chacha20_block(chacha_state, tmp); + chacha20_block(&chacha_state, tmp); memcpy(buf, tmp, len); memzero_explicit(tmp, sizeof(tmp)); break; } - chacha20_block(chacha_state, buf); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, buf); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; len -= CHACHA_BLOCK_SIZE; buf += CHACHA_BLOCK_SIZE; } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); } /* * This returns random bytes in arbitrary quantities. The quality of the - * random bytes is good as /dev/urandom. In order to ensure that the + * random bytes is as good as /dev/urandom. In order to ensure that the * randomness provided by this function is okay, the function * wait_for_random_bytes() should be called and return 0 at least once * at any point prior. @@ -420,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(struct iov_iter *iter) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u8 block[CHACHA_BLOCK_SIZE]; size_t ret = 0, copied; @@ -432,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ - crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4], + CHACHA_KEY_SIZE); /* * However, if we're doing a read of len <= 32, we don't need to * use chacha_state after, so we can simply return those bytes to * the user directly. */ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) { - ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter); + ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter); goto out_zero_chacha; } for (;;) { - chacha20_block(chacha_state, block); - if (unlikely(chacha_state[12] == 0)) - ++chacha_state[13]; + chacha20_block(&chacha_state, block); + if (unlikely(chacha_state.x[12] == 0)) + ++chacha_state.x[13]; copied = copy_to_iter(block, sizeof(block), iter); ret += copied; @@ -463,13 +485,13 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) memzero_explicit(block, sizeof(block)); out_zero_chacha: - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); return ret ? ret : -EFAULT; } /* * Batched entropy returns random integers. The quality of the random - * number is good as /dev/urandom. In order to ensure that the randomness + * number is as good as /dev/urandom. In order to ensure that the randomness * provided by this function is okay, the function wait_for_random_bytes() * should be called and return 0 at least once at any point prior. */ @@ -614,7 +636,7 @@ enum { }; static struct { - struct blake2s_state hash; + struct blake2s_ctx hash; spinlock_t lock; unsigned int init_bits; } input_pool = { @@ -679,7 +701,7 @@ static void extract_entropy(void *buf, size_t len) /* next_key = HASHPRF(seed, RDSEED || 0) */ block.counter = 0; - blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key)); blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); spin_unlock_irqrestore(&input_pool.lock, flags); @@ -689,7 +711,7 @@ static void extract_entropy(void *buf, size_t len) i = min_t(size_t, len, BLAKE2S_HASH_SIZE); /* output = HASHPRF(seed, RDSEED || ++counter) */ ++block.counter; - blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); + blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i); len -= i; buf += i; } @@ -702,9 +724,10 @@ static void extract_entropy(void *buf, size_t len) static void __cold _credit_init_bits(size_t bits) { - static struct execute_work set_ready; + static DECLARE_WORK(set_ready, crng_set_ready); unsigned int new, orig, add; unsigned long flags; + int m; if (!bits) return; @@ -718,15 +741,18 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */ - if (static_key_initialized) - execute_in_process_context(crng_set_ready, &set_ready); + if (system_dfl_wq) + queue_work(system_dfl_wq, &set_ready); atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); +#ifdef CONFIG_VDSO_GETRANDOM + WRITE_ONCE(vdso_k_rng_data->is_ready, true); +#endif wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); - if (urandom_warning.missed) - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); + m = ratelimit_state_get_miss(&urandom_warning); + if (m) + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m); } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) { spin_lock_irqsave(&base_crng.lock, flags); /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */ @@ -768,7 +794,7 @@ static void __cold _credit_init_bits(size_t bits) * * add_bootloader_randomness() is called by bootloader drivers, such as EFI * and device tree, and credits its input depending on whether or not the - * command line option 'random.trust_bootloader'. + * command line option 'random.trust_bootloader' is set. * * add_vmfork_randomness() adds a unique (but not necessarily secret) ID * representing the current instance of a VM to the pool, without crediting, @@ -889,9 +915,8 @@ void __init random_init(void) add_latent_entropy(); /* - * If we were initialized by the cpu or bootloader before jump labels - * are initialized, then we should enable the static branch here, where - * it's guaranteed that jump labels have been initialized. + * If we were initialized by the cpu or bootloader before workqueues + * are initialized, then we should enable the static branch here. */ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) crng_set_ready(NULL); @@ -1270,6 +1295,7 @@ static void __cold try_to_generate_entropy(void) struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES); unsigned int i, num_different = 0; unsigned long last = random_get_entropy(); + cpumask_var_t timer_cpus; int cpu = -1; for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { @@ -1284,13 +1310,15 @@ static void __cold try_to_generate_entropy(void) atomic_set(&stack->samples, 0); timer_setup_on_stack(&stack->timer, entropy_timer, 0); + if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL)) + goto out; + while (!crng_ready() && !signal_pending(current)) { /* * Check !timer_pending() and then ensure that any previous callback has finished - * executing by checking try_to_del_timer_sync(), before queueing the next one. + * executing by checking timer_delete_sync_try(), before queueing the next one. */ - if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { - struct cpumask timer_cpus; + if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) { unsigned int num_cpus; /* @@ -1300,19 +1328,19 @@ static void __cold try_to_generate_entropy(void) preempt_disable(); /* Only schedule callbacks on timer CPUs that are online. */ - cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); - num_cpus = cpumask_weight(&timer_cpus); + cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask); + num_cpus = cpumask_weight(timer_cpus); /* In very bizarre case of misconfiguration, fallback to all online. */ if (unlikely(num_cpus == 0)) { - timer_cpus = *cpu_online_mask; - num_cpus = cpumask_weight(&timer_cpus); + *timer_cpus = *cpu_online_mask; + num_cpus = cpumask_weight(timer_cpus); } /* Basic CPU round-robin, which avoids the current CPU. */ do { - cpu = cpumask_next(cpu, &timer_cpus); - if (cpu == nr_cpumask_bits) - cpu = cpumask_first(&timer_cpus); + cpu = cpumask_next(cpu, timer_cpus); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(timer_cpus); } while (cpu == smp_processor_id() && num_cpus > 1); /* Expiring the timer at `jiffies` means it's the next tick. */ @@ -1328,8 +1356,10 @@ static void __cold try_to_generate_entropy(void) } mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); - del_timer_sync(&stack->timer); - destroy_timer_on_stack(&stack->timer); + free_cpumask_var(timer_cpus); +out: + timer_delete_sync(&stack->timer); + timer_destroy_on_stack(&stack->timer); } @@ -1364,7 +1394,6 @@ static void __cold try_to_generate_entropy(void) SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags) { struct iov_iter iter; - struct iovec iov; int ret; if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) @@ -1385,7 +1414,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags return ret; } - ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter); + ret = import_ubuf(ITER_DEST, ubuf, len, &iter); if (unlikely(ret)) return ret; return get_random_bytes_user(&iter); @@ -1443,7 +1472,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter) if (!crng_ready()) { if (!ratelimit_disable && maxwarn <= 0) - ++urandom_warning.missed; + ratelimit_state_inc_miss(&urandom_warning); else if (ratelimit_disable || __ratelimit(&urandom_warning)) { --maxwarn; pr_notice("%s: uninitialized urandom read (%zu bytes read)\n", @@ -1491,7 +1520,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return 0; case RNDADDENTROPY: { struct iov_iter iter; - struct iovec iov; ssize_t ret; int len; @@ -1503,7 +1531,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EINVAL; if (get_user(len, p++)) return -EFAULT; - ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter); + ret = import_ubuf(ITER_SOURCE, p, len, &iter); if (unlikely(ret)) return ret; ret = write_pool_user(&iter); @@ -1546,7 +1574,7 @@ const struct file_operations random_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; @@ -1557,7 +1585,7 @@ const struct file_operations urandom_fops = { .compat_ioctl = compat_ptr_ioctl, .fasync = random_fasync, .llseek = noop_llseek, - .splice_read = generic_file_splice_read, + .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; @@ -1606,7 +1634,7 @@ static u8 sysctl_bootid[UUID_SIZE]; * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. */ -static int proc_do_uuid(struct ctl_table *table, int write, void *buf, +static int proc_do_uuid(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { u8 tmp_uuid[UUID_SIZE], *uuid; @@ -1637,13 +1665,13 @@ static int proc_do_uuid(struct ctl_table *table, int write, void *buf, } /* The same as proc_dointvec, but writes don't change anything. */ -static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, +static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos) { return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos); } -static struct ctl_table random_table[] = { +static const struct ctl_table random_table[] = { { .procname = "poolsize", .data = &sysctl_poolsize, @@ -1683,7 +1711,6 @@ static struct ctl_table random_table[] = { .mode = 0444, .proc_handler = proc_do_uuid, }, - { } }; /* |
