@@ -1211,6 +1211,7 @@ void add_interrupt_randomness(int irq, i
fast_mix(fast_pool);
add_interrupt_bench(cycles);
+ this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/once.h>
+#include <linux/percpu.h>
#include <uapi/linux/random.h>
@@ -55,6 +56,8 @@ struct rnd_state {
__u32 s1, s2, s3, s4;
};
+DECLARE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
@@ -42,6 +42,7 @@
#include <linux/sched/sysctl.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -1635,6 +1636,13 @@ void update_process_times(int user_tick)
#endif
scheduler_tick();
run_posix_cpu_timers(p);
+
+ /* The current CPU might make use of net randoms without receiving IRQs
+ * to renew them often enough. Let's update the net_rand_state from a
+ * non-constant value that's not affine to the number of calls to make
+ * sure it's updated when there's some activity (we don't care in idle).
+ */
+ this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
}
/**
@@ -47,7 +47,7 @@ static inline void prandom_state_selftes
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/**
* prandom_u32_state - seeded pseudo-random number generator.