summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c137
1 files changed, 82 insertions, 55 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ffd61aadb761..19bfbaf13598 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -497,6 +497,7 @@ static struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
+static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
@@ -889,6 +890,38 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
+static void crng_finalize_init(struct crng_state *crng)
+{
+ if (crng != &primary_crng || crng_init >= 2)
+ return;
+ if (!system_wq) {
+ /* We can't call numa_crng_init until we have workqueues,
+ * so mark this for processing later. */
+ crng_need_final_init = true;
+ return;
+ }
+
+ invalidate_batched_entropy();
+ numa_crng_init();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ pr_notice("crng init done\n");
+ if (unseeded_warning.missed) {
+ pr_notice("random: %d get_random_xx warning(s) missed "
+ "due to ratelimiting\n",
+ unseeded_warning.missed);
+ unseeded_warning.missed = 0;
+ }
+ if (urandom_warning.missed) {
+ pr_notice("random: %d urandom warning(s) missed "
+ "due to ratelimiting\n",
+ urandom_warning.missed);
+ urandom_warning.missed = 0;
+ }
+}
+
#ifdef CONFIG_NUMA
static void do_numa_crng_init(struct work_struct *work)
{
@@ -904,8 +937,8 @@ static void do_numa_crng_init(struct work_struct *work)
crng_initialize(crng);
pool[i] = crng;
}
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ /* pairs with READ_ONCE() in select_crng() */
+ if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
@@ -918,18 +951,38 @@ static void numa_crng_init(void)
{
schedule_work(&numa_crng_init_work);
}
+
+static struct crng_state *select_crng(void)
+{
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+
+ return &primary_crng;
+}
#else
static void numa_crng_init(void) {}
+
+static struct crng_state *select_crng(void)
+{
+ return &primary_crng;
+}
#endif
/*
* crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally.
+ * path. So we can't afford to dilly-dally. Returns the number of
+ * bytes processed from cp.
*/
-static int crng_fast_load(const char *cp, size_t len)
+static size_t crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
char *p;
+ size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -940,7 +993,7 @@ static int crng_fast_load(const char *cp, size_t len)
p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--;
+ cp++; crng_init_cnt++; len--; ret++;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
@@ -949,7 +1002,7 @@ static int crng_fast_load(const char *cp, size_t len)
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
}
- return 1;
+ return ret;
}
/*
@@ -1024,39 +1077,23 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
crng->state[i+4] ^= buf.key[i] ^ rv;
}
memzero_explicit(&buf, sizeof(buf));
- crng->init_time = jiffies;
+ WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
- wake_up_interruptible(&crng_init_wait);
- pr_notice("random: crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("random: %d get_random_xx warning(s) missed "
- "due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
- pr_notice("random: %d urandom warning(s) missed "
- "due to ratelimiting\n",
- urandom_warning.missed);
- urandom_warning.missed = 0;
- }
- }
+ crng_finalize_init(crng);
}
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA_BLOCK_SIZE])
{
- unsigned long v, flags;
+ unsigned long v, flags, init_time;
- if (crng_ready() &&
- (time_after(crng_global_init_time, crng->init_time) ||
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ if (crng_ready()) {
+ init_time = READ_ONCE(crng->init_time);
+ if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
+ time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(crng, crng == &primary_crng ?
+ &input_pool : NULL);
+ }
spin_lock_irqsave(&crng->lock, flags);
if (arch_get_random_long(&v))
crng->state[14] ^= v;
@@ -1068,15 +1105,7 @@ static void _extract_crng(struct crng_state *crng,
static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ _extract_crng(select_crng(), out);
}
/*
@@ -1105,15 +1134,7 @@ static void _crng_backtrack_protect(struct crng_state *crng,
static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
+ _crng_backtrack_protect(select_crng(), tmp, used);
}
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
@@ -1334,7 +1355,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
+ sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
@@ -1957,6 +1978,8 @@ int __init rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
+ if (crng_need_final_init)
+ crng_finalize_init(&primary_crng);
crng_initialize(&primary_crng);
crng_global_init_time = jiffies;
if (ratelimit_disable) {
@@ -2150,7 +2173,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, &input_pool);
- crng_global_init_time = jiffies - 1;
+ WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0;
default:
return -EINVAL;
@@ -2480,15 +2503,19 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
struct entropy_store *poolp = &input_pool;
if (unlikely(crng_init == 0)) {
- crng_fast_load(buffer, count);
- return;
+ size_t ret = crng_fast_load(buffer, count);
+ count -= ret;
+ buffer += ret;
+ if (!count || crng_init == 0)
+ return;
}
/* Suspend writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait,
+ !system_wq || kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);