summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-05-12 15:46:17 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-10-29 18:20:20 +0100
commit8cedb96a0df6c976bcd4bc3f20d7cdec07593628 (patch)
tree5322ea4f7a67a490d4ab89136447eae48c4c45dd /drivers/char
parentfc79dea1e61b8bf477f5b7581db063dfea51ec3c (diff)
random: avoid preempt_disable()ed section
extract_crng() will use sleeping locks while in a preempt_disable() section due to get_cpu_var(). Work around it with local_locks. Cc: stable-rt@vger.kernel.org # where it applies to Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4c20da67edd5..91c1972b6a17 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -265,6 +265,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/locallock.h>
#include <crypto/chacha20.h>
#include <asm/processor.h>
@@ -2223,6 +2224,7 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
* at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
u64 get_random_u64(void)
{
u64 ret;
@@ -2243,7 +2245,7 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u64);
+ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
@@ -2253,12 +2255,13 @@ u64 get_random_u64(void)
ret = batch->entropy_u64[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u64);
+ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
u32 get_random_u32(void)
{
u32 ret;
@@ -2273,7 +2276,7 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u32);
+ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
@@ -2283,7 +2286,7 @@ u32 get_random_u32(void)
ret = batch->entropy_u32[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u32);
+ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
return ret;
}
EXPORT_SYMBOL(get_random_u32);