]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rcu: Avoid resched_cpu() when rescheduling the current CPU
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 26 Jul 2018 20:44:00 +0000 (13:44 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 30 Aug 2018 23:03:45 +0000 (16:03 -0700)
The resched_cpu() interface is quite handy, but it does acquire the
specified CPU's runqueue lock, which does not come for free.  This
commit therefore substitutes the following when directing resched_cpu()
at the current CPU:

set_tsk_need_resched(current);
set_preempt_need_resched();

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h

index 96731f62594ab6e6f25354ee0d493388a4a5fdf6..92346ab8077d4d3a359974d313a5407805f395e1 100644 (file)
@@ -1354,7 +1354,8 @@ static void print_cpu_stall(void)
         * progress and it could be we're stuck in kernel space without context
         * switches for an entirely unreasonable amount of time.
         */
-       resched_cpu(smp_processor_id());
+       set_tsk_need_resched(current);
+       set_preempt_need_resched();
 }
 
 static void check_cpu_stall(struct rcu_data *rdp)
@@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
        WARN_ON_ONCE(!rdp->beenonline);
 
        /* Report any deferred quiescent states if preemption enabled. */
-       if (!(preempt_count() & PREEMPT_MASK))
+       if (!(preempt_count() & PREEMPT_MASK)) {
                rcu_preempt_deferred_qs(current);
-       else if (rcu_preempt_need_deferred_qs(current))
-               resched_cpu(rdp->cpu); /* Provoke future context switch. */
+       } else if (rcu_preempt_need_deferred_qs(current)) {
+               set_tsk_need_resched(current);
+               set_preempt_need_resched();
+       }
 
        /* Update RCU state based on any recent quiescent states. */
        rcu_check_quiescent_state(rdp);
index 78553a8fa3c625a0d468bd626ef267bda223eb64..030df96e0d3c804efe3a74bad36c5e43f518e40a 100644 (file)
@@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused)
                        rcu_report_exp_rdp(rdp);
                } else {
                        rdp->deferred_qs = true;
-                       resched_cpu(rdp->cpu);
+                       set_tsk_need_resched(t);
+                       set_preempt_need_resched();
                }
                return;
        }
@@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused)
         * because we are in an interrupt handler, which will cause that
         * function to take an early exit without doing anything.
         *
-        * Otherwise, use resched_cpu() to force a context switch after
-        * the CPU enables everything.
+        * Otherwise, force a context switch after the CPU enables everything.
         */
        rdp->deferred_qs = true;
        if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
-           WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()))
+           WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
                rcu_preempt_deferred_qs(t);
-       else
-               resched_cpu(rdp->cpu);
+       } else {
+               set_tsk_need_resched(t);
+               set_preempt_need_resched();
+       }
 }
 
 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
@@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused)
        __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
        /* Store .exp before .rcu_urgent_qs. */
        smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
-       resched_cpu(smp_processor_id());
+       set_tsk_need_resched(current);
+       set_preempt_need_resched();
 }
 
 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
index 1e80a0da792448fa94939cced510d8e157fe3590..978ce353980969cb77a1bbba618e189563126aa6 100644 (file)
@@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user)
        if (t->rcu_read_lock_nesting > 0 ||
            (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
                /* No QS, force context switch if deferred. */
-               if (rcu_preempt_need_deferred_qs(t))
-                       resched_cpu(smp_processor_id());
+               if (rcu_preempt_need_deferred_qs(t)) {
+                       set_tsk_need_resched(t);
+                       set_preempt_need_resched();
+               }
        } else if (rcu_preempt_need_deferred_qs(t)) {
                rcu_preempt_deferred_qs(t); /* Report deferred QS. */
                return;