summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c62
1 files changed, 47 insertions, 15 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bca4467e540f..d73a75cbe82d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -698,6 +698,25 @@ u32 get_random_u32(void)
}
EXPORT_SYMBOL(get_random_u32);
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int random_prepare_cpu(unsigned int cpu)
+{
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
+}
+#endif
+
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
@@ -1183,7 +1202,7 @@ struct fast_pool {
};
struct work_struct mix;
unsigned long last;
- atomic_t count;
+ unsigned int count;
u16 reg_idx;
};
@@ -1219,6 +1238,29 @@ static void fast_mix(u32 pool[4])
static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+ */
+int random_online_cpu(unsigned int cpu)
+{
+ /*
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
+ */
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
+
static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
u32 *ptr = (u32 *)regs;
@@ -1243,15 +1285,6 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_disable();
if (fast_pool != this_cpu_ptr(&irq_randomness)) {
local_irq_enable();
- /*
- * If we are unlucky enough to have been moved to another CPU,
- * during CPU hotplug while the CPU was shutdown then we set
- * our count to zero atomically so that when the CPU comes
- * back online, it can enqueue work again. The _release here
- * pairs with the atomic_inc_return_acquire in
- * add_interrupt_randomness().
- */
- atomic_set_release(&fast_pool->count, 0);
return;
}
@@ -1260,7 +1293,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
* consistent view, before we reenable irqs again.
*/
memcpy(pool, fast_pool->pool32, sizeof(pool));
- atomic_set(&fast_pool->count, 0);
+ fast_pool->count = 0;
fast_pool->last = jiffies;
local_irq_enable();
@@ -1296,14 +1329,13 @@ void add_interrupt_randomness(int irq)
}
fast_mix(fast_pool->pool32);
- /* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
- new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
+ new_count = ++fast_pool->count;
if (unlikely(crng_init == 0)) {
if (new_count >= 64 &&
crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32),
true, true) > 0) {
- atomic_set(&fast_pool->count, 0);
+ fast_pool->count = 0;
fast_pool->last = now;
if (spin_trylock(&input_pool.lock)) {
_mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
@@ -1321,7 +1353,7 @@ void add_interrupt_randomness(int irq)
if (unlikely(!fast_pool->mix.func))
INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
- atomic_or(MIX_INFLIGHT, &fast_pool->count);
+ fast_pool->count |= MIX_INFLIGHT;
queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);