]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
slab: Replace synchronize_sched() with synchronize_rcu()
authorPaul E. McKenney <paulmck@linux.ibm.com>
Wed, 7 Nov 2018 03:24:33 +0000 (19:24 -0800)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 27 Nov 2018 17:21:45 +0000 (09:21 -0800)
Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can be
replaced by synchronize_rcu().  This commit therefore makes this change.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <linux-mm@kvack.org>
mm/slab.c
mm/slab_common.c

index 2a5654bb3b3ff3fc66e7c0ebca8a583cec61d3f1..3abb9feb3818e5459f4ce658469e835582025b10 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
         * To protect lockless access to n->shared during irq disabled context.
         * If n->shared isn't NULL in irq disabled context, accessing to it is
         * guaranteed to be valid until irq is re-enabled, because it will be
-        * freed after synchronize_sched().
+        * freed after synchronize_rcu().
         */
        if (old_shared && force_change)
-               synchronize_sched();
+               synchronize_rcu();
 
 fail:
        kfree(old_shared);
index 7eb8dc136c1cb879b74b4102e201015a85867ee6..9c11e8a937d2e419b0b89532bfe75d80a25f7fe2 100644 (file)
@@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
        css_get(&s->memcg_params.memcg->css);
 
        s->memcg_params.deact_fn = deact_fn;
-       call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+       call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
 }
 
 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
        mutex_unlock(&slab_mutex);
 
        /*
-        * SLUB deactivates the kmem_caches through call_rcu_sched. Make
+        * SLUB deactivates the kmem_caches through call_rcu. Make
         * sure all registered rcu callbacks have been invoked.
         */
        if (IS_ENABLED(CONFIG_SLUB))
-               rcu_barrier_sched();
+               rcu_barrier();
 
        /*
         * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB