]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rcu: Speed up expedited GPs when interrupting RCU reader
authorPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 16 Oct 2018 11:12:58 +0000 (04:12 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Mon, 12 Nov 2018 17:03:59 +0000 (09:03 -0800)
In PREEMPT kernels, an expedited grace period might send an IPI to a
CPU that is executing an RCU read-side critical section.  In that case,
it would be nice if the rcu_read_unlock() directly interacted with the
RCU core code to immediately report the quiescent state.  And this does
happen in the case where the reader has been preempted.  But it would
also be a nice performance optimization if immediate reporting also
happened in the preemption-free case.

This commit therefore adds an ->exp_hint field to the task_struct structure's
->rcu_read_unlock_special field.  The IPI handler sets this hint when
it has interrupted an RCU read-side critical section, and this causes
the outermost rcu_read_unlock() call to invoke rcu_read_unlock_special(),
which, if preemption is enabled, reports the quiescent state immediately.
If preemption is disabled, then the report is required to be deferred
until preemption (or bottom halves or interrupts or whatever) is re-enabled.

Because this is a hint, it does nothing for more complicated cases.  For
example, if the IPI interrupts an RCU reader, but interrupts are disabled
across the rcu_read_unlock(), but another rcu_read_lock() is executed
before interrupts are re-enabled, the hint will already have been cleared.
If you do crazy things like this, reporting will be deferred until some
later RCU_SOFTIRQ handler, context switch, cond_resched(), or similar.

Reported-by: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
include/linux/sched.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index a51c13c2b1a0316b00f928bc9e5afaa2ec949d83..e4c7b6241088ccd811b60a4d1818fbfc2658a900 100644 (file)
@@ -572,8 +572,10 @@ union rcu_special {
        struct {
                u8                      blocked;
                u8                      need_qs;
+               u8                      exp_hint; /* Hint for performance. */
+               u8                      pad; /* No garbage from compiler! */
        } b; /* Bits. */
-       u16 s; /* Set of bits. */
+       u32 s; /* Set of bits. */
 };
 
 enum perf_event_task_context {
index e669ccf3751b1bd5c6abbe411ad48ed315bf2522..928fe5893a57d5e88498beecd035080c993cc6af 100644 (file)
@@ -692,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
         */
        if (t->rcu_read_lock_nesting > 0) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               if (rnp->expmask & rdp->grpmask)
+               if (rnp->expmask & rdp->grpmask) {
                        rdp->deferred_qs = true;
+                       WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+               }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 
index 05915e53633657e8e66850a7527abac87300673c..618956cc7a55f3a9724a6cbcc5e0f5b90504dea6 100644 (file)
@@ -642,13 +642,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
 
        local_irq_save(flags);
        irqs_were_disabled = irqs_disabled_flags(flags);
-       if ((preempt_bh_were_disabled || irqs_were_disabled) &&
-           t->rcu_read_unlock_special.b.blocked) {
+       if (preempt_bh_were_disabled || irqs_were_disabled) {
+               WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
                /* Need to defer quiescent state until everything is enabled. */
-               raise_softirq_irqoff(RCU_SOFTIRQ);
+               if (irqs_were_disabled) {
+                       /* Enabling irqs does not reschedule, so... */
+                       raise_softirq_irqoff(RCU_SOFTIRQ);
+               } else {
+                       /* Enabling BH or preempt does reschedule, so... */
+                       set_tsk_need_resched(current);
+                       set_preempt_need_resched();
+               }
                local_irq_restore(flags);
                return;
        }
+       WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
        rcu_preempt_deferred_qs_irqrestore(t, flags);
 }