]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rcu: Use irq_work to get scheduler's attention in clean context
authorPaul E. McKenney <paulmck@linux.ibm.com>
Thu, 4 Apr 2019 19:19:25 +0000 (12:19 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Sat, 25 May 2019 21:50:49 +0000 (14:50 -0700)
When rcu_read_unlock_special() is invoked with interrupts disabled, is
either not in an interrupt handler or is not using RCU_SOFTIRQ, is not
the first RCU read-side critical section in the chain, and either there
is an expedited grace period in flight or this is a NO_HZ_FULL kernel,
the end of the grace period can be unduly delayed.  The reason for this
is that it is not safe to do wakeups in this situation.

This commit fixes this problem by using the irq_work subsystem to
force a later interrupt handler in a clean environment.  Because
set_tsk_need_resched(current) and set_preempt_need_resched() are
invoked prior to this, the scheduler will force a context switch
upon return from this interrupt (though perhaps at the end of any
interrupted preempt-disable or BH-disable region of code), which will
invoke rcu_note_context_switch() (again in a clean environment), which
will in turn give RCU the chance to report the deferred quiescent state.

Of course, by then this task might be within another RCU read-side
critical section.  But that will be detected at that time and reporting
will be further deferred to the outermost rcu_read_unlock().  See
rcu_preempt_need_deferred_qs() and rcu_preempt_deferred_qs() for more
details on the checking.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h

index a1a72a1ecb026baf16d335140b3c29718a608d27..21d740f0b8dc8a99698611276e3ee898aebdf57c 100644 (file)
@@ -161,6 +161,8 @@ struct rcu_data {
                                        /*  ticks this CPU has handled */
                                        /*  during and after the last grace */
                                        /* period it is aware of. */
+       struct irq_work defer_qs_iw;    /* Obtain later scheduler attention. */
+       bool defer_qs_iw_pending;       /* Scheduler attention pending? */
 
        /* 2) batch handling */
        struct rcu_segcblist cblist;    /* Segmented callback list, with */
index e1005f5e8094e5f362ae9e27b614fbea9e288a6c..58c7853f19e73e7ac713b1ccf8e7630e5121e617 100644 (file)
@@ -587,6 +587,17 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
                t->rcu_read_lock_nesting += RCU_NEST_BIAS;
 }
 
+/*
+ * Minimal handler to give the scheduler a chance to re-evaluate.
+ */
+static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
+{
+       struct rcu_data *rdp;
+
+       rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
+       rdp->defer_qs_iw_pending = false;
+}
+
 /*
  * Handle special cases during rcu_read_unlock(), such as needing to
  * notify RCU core processing or task having blocked during the RCU
@@ -630,6 +641,15 @@ static void rcu_read_unlock_special(struct task_struct *t)
                        // Also if no expediting or NO_HZ_FULL, slow is OK.
                        set_tsk_need_resched(current);
                        set_preempt_need_resched();
+                       if (IS_ENABLED(CONFIG_IRQ_WORK) &&
+                           !rdp->defer_qs_iw_pending && exp) {
+                               // Get scheduler to re-evaluate and call hooks.
+                               // If !IRQ_WORK, FQS scan will eventually IPI.
+                               init_irq_work(&rdp->defer_qs_iw,
+                                             rcu_preempt_deferred_qs_handler);
+                               rdp->defer_qs_iw_pending = true;
+                               irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
+                       }
                }
                t->rcu_read_unlock_special.b.deferred_qs = true;
                local_irq_restore(flags);