]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rcu: Abstract the dynticks snapshot operation
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 2 Nov 2016 21:12:05 +0000 (14:12 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 16 Jan 2017 23:47:53 +0000 (15:47 -0800)
This commit is the second step towards full abstraction of all accesses to
the ->dynticks counter, implementing the previously open-coded atomic
add of zero in a new rcu_dynticks_snap() function.  This abstraction will
ease changes o the ->dynticks counter operation.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcu/tree.c
kernel/rcu/tree_exp.h

index 14e283c351f6be238d4d3504304f75980f6421c9..805d55ee0b2a848a6dc3e1fb84a9283824a3aee9 100644 (file)
@@ -281,6 +281,17 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 };
 
+/*
+ * Snapshot the ->dynticks counter with full ordering so as to allow
+ * stable comparison of this counter with past and future snapshots.
+ */
+static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+{
+       int snap = atomic_add_return(0, &rdtp->dynticks);
+
+       return snap;
+}
+
 /*
  * Do a double-increment of the ->dynticks counter to emulate a
  * momentary idle-CPU quiescent state.
@@ -1049,7 +1060,9 @@ void rcu_nmi_exit(void)
  */
 bool notrace __rcu_is_watching(void)
 {
-       return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
+       return atomic_read(&rdtp->dynticks) & 0x1;
 }
 
 /**
@@ -1132,7 +1145,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 static int dyntick_save_progress_counter(struct rcu_data *rdp,
                                         bool *isidle, unsigned long *maxj)
 {
-       rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+       rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        rcu_sysidle_check_cpu(rdp, isidle, maxj);
        if ((rdp->dynticks_snap & 0x1) == 0) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
@@ -1157,7 +1170,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        int *rcrmp;
        unsigned int snap;
 
-       curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
+       curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
        snap = (unsigned int)rdp->dynticks_snap;
 
        /*
index e59e1849b89aca14797999deb3e9e91bdd9b78c2..011f626b2fd83d63f1c3065ce36d2189d2e86dc1 100644 (file)
@@ -356,10 +356,9 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                mask_ofl_test = 0;
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
                        rdp->exp_dynticks_snap =
-                               atomic_add_return(0, &rdtp->dynticks);
+                               rcu_dynticks_snap(rdp->dynticks);
                        if (raw_smp_processor_id() == cpu ||
                            !(rdp->exp_dynticks_snap & 0x1) ||
                            !(rnp->qsmaskinitnext & rdp->grpmask))
@@ -380,12 +379,11 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
                        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 
                        if (!(mask_ofl_ipi & mask))
                                continue;
 retry_ipi:
-                       if (atomic_add_return(0, &rdtp->dynticks) !=
+                       if (rcu_dynticks_snap(rdp->dynticks) !=
                            rdp->exp_dynticks_snap) {
                                mask_ofl_test |= mask;
                                continue;