]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
percpu: READ_ONCE() now implies smp_read_barrier_depends()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 9 Oct 2017 17:20:44 +0000 (10:20 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 4 Dec 2017 18:52:53 +0000 (10:52 -0800)
Because READ_ONCE() now implies smp_read_barrier_depends(), this commit
removes the now-redundant smp_read_barrier_depends() following the
READ_ONCE() in __ref_is_percpu().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
include/linux/percpu-refcount.h
lib/percpu-refcount.c

index 6658d9ee525720f2d80557380045291bc71d075a..864d167a10739e464ec534ad3b32a96757e316ac 100644 (file)
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
         * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
         * between contaminating the pointer value, meaning that
         * READ_ONCE() is required when fetching it.
+        *
+        * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+        * with smp_store_release() in __percpu_ref_switch_to_percpu().
         */
        percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
 
-       /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
-       smp_read_barrier_depends();
-
        /*
         * Theoretically, the following could test just ATOMIC; however,
         * then we'd have to mask off DEAD separately as DEAD may be
index fe03c6d527611937e196395f83f7348da86f8f06..30e7dd88148b0282c1f445047b360156e79278a0 100644 (file)
@@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
        atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
 
        /*
-        * Restore per-cpu operation.  smp_store_release() is paired with
-        * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
-        * that the zeroing is visible to all percpu accesses which can see
-        * the following __PERCPU_REF_ATOMIC clearing.
+        * Restore per-cpu operation.  smp_store_release() is paired
+        * with READ_ONCE() in __ref_is_percpu() and guarantees that the
+        * zeroing is visible to all percpu accesses which can see the
+        * following __PERCPU_REF_ATOMIC clearing.
         */
        for_each_possible_cpu(cpu)
                *per_cpu_ptr(percpu_count, cpu) = 0;