summaryrefslogtreecommitdiff
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c101
1 files changed, 81 insertions, 20 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 01a260f67437..afa3ce7d3e72 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -288,7 +288,6 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
-#define DEBUG_RANDOM_BOOT 0
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
@@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
+static void _get_random_bytes(void *buf, int nbytes);
/**********************************************************************
*
@@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
_extract_entropy(&input_pool, &crng->state[4],
sizeof(__u32) * 12, 0);
else
- get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
@@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
}
-static inline void crng_wait_ready(void)
-{
- wait_event_interruptible(crng_init_wait, crng_ready());
-}
-
static void _extract_crng(struct crng_state *crng,
__u8 out[CHACHA20_BLOCK_SIZE])
{
@@ -987,6 +982,11 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
+ if (!crng_ready()) {
+ crng_fast_load(buf, size);
+ return;
+ }
+
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&input_pool, buf, size);
@@ -1472,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
return ret;
}
+#define warn_unseeded_randomness(previous) \
+ _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+
+static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ void **previous)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ const bool print_once = false;
+#else
+ static bool print_once __read_mostly;
+#endif
+
+ if (print_once ||
+ crng_ready() ||
+ (previous && (caller == READ_ONCE(*previous))))
+ return;
+ WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ print_once = true;
+#endif
+ pr_notice("random: %s called from %pF with crng_init=%d\n",
+ func_name, caller, crng_init);
+}
+
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
* TCP sequence numbers, etc. It does not rely on the hardware random
* number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch().
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
-void get_random_bytes(void *buf, int nbytes)
+static void _get_random_bytes(void *buf, int nbytes)
{
__u8 tmp[CHACHA20_BLOCK_SIZE];
-#if DEBUG_RANDOM_BOOT > 0
- if (!crng_ready())
- printk(KERN_NOTICE "random: %pF get_random_bytes called "
- "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
-#endif
trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes >= CHACHA20_BLOCK_SIZE) {
@@ -1504,9 +1526,35 @@ void get_random_bytes(void *buf, int nbytes)
crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}
+
+void get_random_bytes(void *buf, int nbytes)
+{
+ static void *previous;
+
+ warn_unseeded_randomness(&previous);
+ _get_random_bytes(buf, nbytes);
+}
EXPORT_SYMBOL(get_random_bytes);
/*
+ * Wait for the urandom pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the urandom pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ if (likely(crng_ready()))
+ return 0;
+ return wait_event_interruptible(crng_init_wait, crng_ready());
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1860,6 +1908,8 @@ const struct file_operations urandom_fops = {
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
unsigned int, flags)
{
+ int ret;
+
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
return -EINVAL;
@@ -1872,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if (!crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
- crng_wait_ready();
- if (signal_pending(current))
- return -ERESTARTSYS;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
return urandom_read(NULL, buf, count, NULL);
}
@@ -2035,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
/*
* Get a random word for internal kernel use only. The quality of the random
* number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy.
+ * goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock = READ_ONCE(crng_init) < 2;
+ bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
#if BITS_PER_LONG == 64
if (arch_get_random_long((unsigned long *)&ret))
@@ -2054,6 +2108,9 @@ u64 get_random_u64(void)
return ret;
#endif
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u64);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
@@ -2073,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock = READ_ONCE(crng_init) < 2;
+ bool use_lock;
unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
if (arch_get_random_int(&ret))
return ret;
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u32);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);