| From e455fbaab7bf46ddde36f3ede5363e03327a316e Mon Sep 17 00:00:00 2001 |
| From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Date: Fri, 12 May 2017 15:46:17 +0200 |
| Subject: [PATCH] random: avoid preempt_disable()ed section |
| |
| extract_crng() will use sleeping locks while in a preempt_disable() |
| section due to get_cpu_var(). |
| Work around it with local_locks. |
| |
| Cc: stable-rt@vger.kernel.org # where it applies to |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| |
| diff --git a/drivers/char/random.c b/drivers/char/random.c |
| index d7f44305e33e..f0ed557c9d26 100644 |
| --- a/drivers/char/random.c |
| +++ b/drivers/char/random.c |
| @@ -265,6 +265,7 @@ |
| #include <linux/syscalls.h> |
| #include <linux/completion.h> |
| #include <linux/uuid.h> |
| +#include <linux/locallock.h> |
| #include <crypto/chacha20.h> |
| |
| #include <asm/processor.h> |
| @@ -2055,6 +2056,7 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ |
| * at any point prior. |
| */ |
| static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); |
| +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock); |
| u64 get_random_u64(void) |
| { |
| u64 ret; |
| @@ -2071,7 +2073,7 @@ u64 get_random_u64(void) |
| return ret; |
| #endif |
| |
| - batch = &get_cpu_var(batched_entropy_u64); |
| + batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64); |
| if (use_lock) |
| read_lock_irqsave(&batched_entropy_reset_lock, flags); |
| if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
| @@ -2081,12 +2083,13 @@ u64 get_random_u64(void) |
| ret = batch->entropy_u64[batch->position++]; |
| if (use_lock) |
| read_unlock_irqrestore(&batched_entropy_reset_lock, flags); |
| - put_cpu_var(batched_entropy_u64); |
| + put_locked_var(batched_entropy_u64_lock, batched_entropy_u64); |
| return ret; |
| } |
| EXPORT_SYMBOL(get_random_u64); |
| |
| static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); |
| +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock); |
| u32 get_random_u32(void) |
| { |
| u32 ret; |
| @@ -2097,7 +2100,7 @@ u32 get_random_u32(void) |
| if (arch_get_random_int(&ret)) |
| return ret; |
| |
| - batch = &get_cpu_var(batched_entropy_u32); |
| + batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32); |
| if (use_lock) |
| read_lock_irqsave(&batched_entropy_reset_lock, flags); |
| if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
| @@ -2107,7 +2110,7 @@ u32 get_random_u32(void) |
| ret = batch->entropy_u32[batch->position++]; |
| if (use_lock) |
| read_unlock_irqrestore(&batched_entropy_reset_lock, flags); |
| - put_cpu_var(batched_entropy_u32); |
| + put_locked_var(batched_entropy_u32_lock, batched_entropy_u32); |
| return ret; |
| } |
| EXPORT_SYMBOL(get_random_u32); |
| -- |
| 2.15.0 |
| |