]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rcu: Remove obsolete __rcu_pending() statistics for debugfs
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 10 Jan 2018 20:36:00 +0000 (12:36 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 21 Feb 2018 00:10:28 +0000 (16:10 -0800)
The debugfs interface displayed statistics on RCU-pending checks
but this interface has since been removed.  This commit therefore
removes the no-longer-used rcu_data structure's ->n_rcu_pending,
->n_rp_core_needs_qs, ->n_rp_report_qs, ->n_rp_cb_ready,
->n_rp_cpu_needs_gp, ->n_rp_gp_completed, ->n_rp_gp_started,
->n_rp_nocb_defer_wakeup, and ->n_rp_need_nothing fields along with
their updates.

If this information proves necessary in the future, the corresponding
event traces will be added.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree.h

index 8e0711954bbff0bf2d17b10d1894e5b6caff4e6a..99d59be761d1ebea22c661a6212595235cdedb7d 100644 (file)
@@ -3354,8 +3354,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        struct rcu_node *rnp = rdp->mynode;
 
-       rdp->n_rcu_pending++;
-
        /* Check for CPU stalls, if enabled. */
        check_cpu_stall(rsp, rdp);
 
@@ -3364,48 +3362,31 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
                return 0;
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
-       if (rcu_scheduler_fully_active &&
-           rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
-           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
-               rdp->n_rp_core_needs_qs++;
-       } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
-               rdp->n_rp_report_qs++;
+       if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
                return 1;
-       }
 
        /* Does this CPU have callbacks ready to invoke? */
-       if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
-               rdp->n_rp_cb_ready++;
+       if (rcu_segcblist_ready_cbs(&rdp->cblist))
                return 1;
-       }
 
        /* Has RCU gone idle with this CPU needing another grace period? */
-       if (cpu_needs_another_gp(rsp, rdp)) {
-               rdp->n_rp_cpu_needs_gp++;
+       if (cpu_needs_another_gp(rsp, rdp))
                return 1;
-       }
 
        /* Has another RCU grace period completed?  */
-       if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
-               rdp->n_rp_gp_completed++;
+       if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
                return 1;
-       }
 
        /* Has a new RCU grace period started? */
        if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
-           unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
-               rdp->n_rp_gp_started++;
+           unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
                return 1;
-       }
 
        /* Does this CPU need a deferred NOCB wakeup? */
-       if (rcu_nocb_need_deferred_wakeup(rdp)) {
-               rdp->n_rp_nocb_defer_wakeup++;
+       if (rcu_nocb_need_deferred_wakeup(rdp))
                return 1;
-       }
 
        /* nothing to do */
-       rdp->n_rp_need_nothing++;
        return 0;
 }
 
index b258fac7352434890f9702d0045ca95f01fec3aa..d29bab8dea28a2696c11e413f6a741d8b1fe1ec9 100644 (file)
@@ -226,18 +226,7 @@ struct rcu_data {
                                        /* Grace period that needs help */
                                        /*  from cond_resched(). */
 
-       /* 5) __rcu_pending() statistics. */
-       unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
-       unsigned long n_rp_core_needs_qs;
-       unsigned long n_rp_report_qs;
-       unsigned long n_rp_cb_ready;
-       unsigned long n_rp_cpu_needs_gp;
-       unsigned long n_rp_gp_completed;
-       unsigned long n_rp_gp_started;
-       unsigned long n_rp_nocb_defer_wakeup;
-       unsigned long n_rp_need_nothing;
-
-       /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
+       /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
        struct rcu_head barrier_head;
 #ifdef CONFIG_RCU_FAST_NO_HZ
        struct rcu_head oom_head;
@@ -248,7 +237,7 @@ struct rcu_data {
        atomic_long_t exp_workdone3;    /* # done by others #3. */
        int exp_dynticks_snap;          /* Double-check need for IPI. */
 
-       /* 7) Callback offloading. */
+       /* 6) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
        struct rcu_head *nocb_head;     /* CBs waiting for kthread. */
        struct rcu_head **nocb_tail;
@@ -275,7 +264,7 @@ struct rcu_data {
                                        /* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-       /* 8) RCU CPU stall data. */
+       /* 7) RCU CPU stall data. */
        unsigned int softirq_snap;      /* Snapshot of softirq activity. */
        /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
        struct irq_work rcu_iw;         /* Check for non-irq activity. */