WIP: random32: add noise from network activity
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d20ba1b..2a41b21 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1277,7 +1277,6 @@
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index aa16e64..e2b4990 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -20,7 +20,7 @@
__u32 s1, s2, s3, s4;
};
-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
+DECLARE_PER_CPU(unsigned long, net_rand_noise);
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
@@ -67,6 +67,7 @@
state->s2 = __seed(i, 8U);
state->s3 = __seed(i, 16U);
state->s4 = __seed(i, 128U);
+ __this_cpu_add(net_rand_noise, i);
}
/* Pseudo random number generator from numerical recipes. */
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ae5029f..2f07c56 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1721,7 +1721,7 @@
* non-constant value that's not affine to the number of calls to make
* sure it's updated when there's some activity (we don't care in idle).
*/
- this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
+ __this_cpu_add(net_rand_noise, rol32(jiffies, 24) + user_tick);
}
/**
diff --git a/lib/random32.c b/lib/random32.c
index 2b048e2..d74cf1d 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -320,6 +320,8 @@
};
static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(unsigned long, net_rand_noise);
+EXPORT_SYMBOL(net_rand_noise);
#if BITS_PER_LONG == 64
/*
@@ -334,7 +336,7 @@
#define K0 (0x736f6d6570736575 ^ 0x6c7967656e657261 )
#define K1 (0x646f72616e646f6d ^ 0x7465646279746573 )
-#elif BITS_PER_LONG == 23
+#elif BITS_PER_LONG == 32
/*
* On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
* This is weaker, but 32-bit machines are not used for high-traffic
@@ -374,9 +376,12 @@
static u32 siprand_u32(struct siprand_state *s)
{
unsigned long v0 = s->v[0], v1 = s->v[1], v2 = s->v[2], v3 = s->v[3];
+ unsigned long n = __this_cpu_read(net_rand_noise);
+ v3 ^= n;
SIPROUND(v0, v1, v2, v3);
SIPROUND(v0, v1, v2, v3);
+ v0 ^= n;
s->v[0] = v0; s->v[1] = v1; s->v[2] = v2; s->v[3] = v3;
return v1 + v3;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 7df6c96..55a2471 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -144,6 +144,7 @@
#include <linux/indirect_call_wrapper.h>
#include <net/devlink.h>
#include <linux/pm_runtime.h>
+#include <linux/prandom.h>
#include "net-sysfs.h"
@@ -3557,6 +3558,7 @@
dev_queue_xmit_nit(skb, dev);
len = skb->len;
+ __this_cpu_add(net_rand_noise, (long)skb + (long)dev + (long)txq + len + jiffies);
trace_net_dev_start_xmit(skb, dev);
rc = netdev_start_xmit(skb, dev, txq, more);
trace_net_dev_xmit(skb, rc, dev, len);
@@ -4129,6 +4131,7 @@
if (!skb)
goto out;
+ __this_cpu_add(net_rand_noise, (long)skb + (long)dev + (long)txq + jiffies);
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -4194,6 +4197,7 @@
skb_set_queue_mapping(skb, queue_id);
txq = skb_get_tx_queue(dev, skb);
+ __this_cpu_add(net_rand_noise, (long)skb + (long)dev + (long)txq + jiffies);
local_bh_disable();