Files
linuxkit/kernel/patches-4.14.x-rt/0361-random-avoid-preempt_disable-ed-section.patch
Tiejun Chen ef9302bc01 update -rt to 4.14.87-rt50
Signed-off-by: Tiejun Chen <tiejunc@vmware.com>
2019-01-14 20:49:28 -08:00

81 lines
2.8 KiB
Diff

From 9926c096cded9f549edd991ee41f3f2d26c71691 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 12 May 2017 15:46:17 +0200
Subject: [PATCH 361/450] random: avoid preempt_disable()ed section
extract_crng() will use sleeping locks while in a preempt_disable()
section due to get_cpu_var().
Work around it with local_locks.
Cc: stable-rt@vger.kernel.org # where it applies to
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/char/random.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 29a46c372df3..9fb3c929765c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -265,6 +265,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/locallock.h>
#include <crypto/chacha20.h>
#include <asm/processor.h>
@@ -2196,6 +2197,7 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
* at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
u64 get_random_u64(void)
{
u64 ret;
@@ -2216,7 +2218,7 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u64);
+ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
@@ -2226,12 +2228,13 @@ u64 get_random_u64(void)
ret = batch->entropy_u64[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u64);
+ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
u32 get_random_u32(void)
{
u32 ret;
@@ -2246,7 +2249,7 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u32);
+ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
@@ -2256,7 +2259,7 @@ u32 get_random_u32(void)
ret = batch->entropy_u32[batch->position++];
if (use_lock)
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u32);
+ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
--
2.19.2