aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSasha Levin2014-03-28 11:38:42 -0500
committerDavid S. Miller2014-03-28 15:04:19 -0500
commit05efa8c943b1d5d90fa8c8147571837573338bb6 (patch)
tree7882312770611805d896178894810f0d9467469b
parent335a67d2ad481b03607bf30a38c28178fa1ad61a (diff)
downloadkernel-common-05efa8c943b1d5d90fa8c8147571837573338bb6.tar.gz
kernel-common-05efa8c943b1d5d90fa8c8147571837573338bb6.tar.xz
kernel-common-05efa8c943b1d5d90fa8c8147571837573338bb6.zip
random32: avoid attempt to late reseed if in the middle of seeding
Commit 4af712e8df ("random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized") has added a late reseed stage that happens as soon as the nonblocking pool is marked as initialized. This fails in the case that the nonblocking pool gets initialized during __prandom_reseed()'s call to get_random_bytes(). In that case we'd double back into __prandom_reseed() in an attempt to do a late reseed - deadlocking on 'lock' early on in the boot process. Instead, just avoid even waiting to do a reseed if a reseed is already occuring. Fixes: 4af712e8df99 ("random32: add prandom_reseed_late() and call when nonblocking pool becomes initialized") Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/random32.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df4429..61489677870 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
244 static bool latch = false; 244 static bool latch = false;
245 static DEFINE_SPINLOCK(lock); 245 static DEFINE_SPINLOCK(lock);
246 246
247 /* Asking for random bytes might result in bytes getting
248 * moved into the nonblocking pool and thus marking it
249 * as initialized. In this case we would double back into
250 * this function and attempt to do a late reseed.
251 * Ignore the pointless attempt to reseed again if we're
252 * already waiting for bytes when the nonblocking pool
253 * got initialized.
254 */
255
247 /* only allow initial seeding (late == false) once */ 256 /* only allow initial seeding (late == false) once */
248 spin_lock_irqsave(&lock, flags); 257 if (!spin_trylock_irqsave(&lock, flags))
258 return;
259
249 if (latch && !late) 260 if (latch && !late)
250 goto out; 261 goto out;
251 latch = true; 262 latch = true;