summaryrefslogtreecommitdiff
path: root/lib/random32.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-10-08 05:26:44 -0700
committerDavid S. Miller <davem@davemloft.net>2015-10-08 05:26:44 -0700
commitdf718423250c000ca4323a767cedc2f3219b685c (patch)
tree3afa13e8acbdf49a9f8c1a7993065c3836997a01 /lib/random32.c
parent28335a7445202a3d118145a07d9138e9881ebe18 (diff)
parent3ad0040573b0c00f88488bc31958acd07a55ee2e (diff)
Merge branch 'bpf_random32'
Daniel Borkmann says: ==================== BPF/random32 updates BPF update to split the prandom state apart, and to move the *once helpers to the core. For details, please see individual patches. Given the changes and since it's in the tree for quite some time, net-next is a better choice in our opinion. v1 -> v2: - Make DO_ONCE() type-safe, remove the kvec helper. Credits go to Alexei Starovoitov for the __VA_ARGS__ hint, thanks! - Add a comment to the DO_ONCE() helper as suggested by Alexei. - Rework prandom_init_once() helper to the new API. - Keep Alexei's Acked-by on the last patch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/random32.c')
-rw-r--r--lib/random32.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/lib/random32.c b/lib/random32.c
index 0bee183fa18f..12111910ccd0 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -181,7 +181,7 @@ void prandom_seed(u32 entropy)
* No locking on the CPUs, but then somewhat random results are, well,
* expected.
*/
- for_each_possible_cpu (i) {
+ for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
state->s1 = __seed(state->s1 ^ entropy, 2U);
@@ -201,7 +201,7 @@ static int __init prandom_init(void)
prandom_state_selftest();
for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
prandom_seed_early(state, weak_seed, true);
@@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void)
add_timer(&seed_timer);
}
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
+ u32 seeds[4];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+ state->s1 = __seed(seeds[0], 2U);
+ state->s2 = __seed(seeds[1], 8U);
+ state->s3 = __seed(seeds[2], 16U);
+ state->s4 = __seed(seeds[3], 128U);
+
+ prandom_warmup(state);
+ }
+}
+
/*
* Generate better values after random number generator
* is fully initialized.
*/
static void __prandom_reseed(bool late)
{
- int i;
unsigned long flags;
static bool latch = false;
static DEFINE_SPINLOCK(lock);
@@ -266,19 +283,7 @@ static void __prandom_reseed(bool late)
goto out;
latch = true;
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
- u32 seeds[4];
-
- get_random_bytes(&seeds, sizeof(seeds));
- state->s1 = __seed(seeds[0], 2U);
- state->s2 = __seed(seeds[1], 8U);
- state->s3 = __seed(seeds[2], 16U);
- state->s4 = __seed(seeds[3], 128U);
-
- prandom_warmup(state);
- }
+ prandom_seed_full_state(&net_rand_state);
out:
spin_unlock_irqrestore(&lock, flags);
}