]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
random: set up the NUMA crng instances after the CRNG is fully initialized
authorTheodore Ts'o <tytso@mit.edu>
Wed, 11 Apr 2018 19:23:56 +0000 (15:23 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Sat, 14 Apr 2018 15:59:19 +0000 (11:59 -0400)
Until the primary_crng is fully initialized, don't initialize the NUMA
crng nodes.  Otherwise users of /dev/urandom on NUMA systems before
the CRNG is fully initialized can get very bad quality randomness.  Of
course everyone should move to getrandom(2) where this won't be an
issue, but there's a lot of legacy code out there.  This related to
CVE-2018-1108.

Reported-by: Jann Horn <jannh@google.com>
Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...")
Cc: stable@kernel.org # 4.8+
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
drivers/char/random.c

index 6baa828c0493d5c30da47a1c47fe1d5d988e9b27..02d792f7933febeaec7f90b8324b021045f83802 100644 (file)
@@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng)
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
+#ifdef CONFIG_NUMA
+static void numa_crng_init(void)
+{
+       int i;
+       struct crng_state *crng;
+       struct crng_state **pool;
+
+       pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+       for_each_online_node(i) {
+               crng = kmalloc_node(sizeof(struct crng_state),
+                                   GFP_KERNEL | __GFP_NOFAIL, i);
+               spin_lock_init(&crng->lock);
+               crng_initialize(crng);
+               pool[i] = crng;
+       }
+       mb();
+       if (cmpxchg(&crng_node_pool, NULL, pool)) {
+               for_each_node(i)
+                       kfree(pool[i]);
+               kfree(pool);
+       }
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
 /*
  * crng_fast_load() can be called by code in the interrupt service
  * path.  So we can't afford to dilly-dally.
@@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
        spin_unlock_irqrestore(&primary_crng.lock, flags);
        if (crng == &primary_crng && crng_init < 2) {
                invalidate_batched_entropy();
+               numa_crng_init();
                crng_init = 2;
                process_random_ready_list();
                wake_up_interruptible(&crng_init_wait);
@@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r)
  */
 static int rand_initialize(void)
 {
-#ifdef CONFIG_NUMA
-       int i;
-       struct crng_state *crng;
-       struct crng_state **pool;
-#endif
-
        init_std_data(&input_pool);
        init_std_data(&blocking_pool);
        crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
-       pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
-       for_each_online_node(i) {
-               crng = kmalloc_node(sizeof(struct crng_state),
-                                   GFP_KERNEL | __GFP_NOFAIL, i);
-               spin_lock_init(&crng->lock);
-               crng_initialize(crng);
-               pool[i] = crng;
-       }
-       mb();
-       crng_node_pool = pool;
-#endif
        return 0;
 }
 early_initcall(rand_initialize);