]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/rcu/tree_stall.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux.git] / kernel / rcu / tree_stall.h
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * RCU CPU stall warnings for normal RCU grace periods
4  *
5  * Copyright IBM Corporation, 2019
6  *
7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 //////////////////////////////////////////////////////////////////////////////
11 //
12 // Controlling CPU stall warnings, including delay calculation.
13
14 /* panic() on RCU Stall sysctl. */
15 int sysctl_panic_on_rcu_stall __read_mostly;
16
17 #ifdef CONFIG_PROVE_RCU
18 #define RCU_STALL_DELAY_DELTA          (5 * HZ)
19 #else
20 #define RCU_STALL_DELAY_DELTA          0
21 #endif
22
23 /* Limit-check stall timeouts specified at boottime and runtime. */
24 int rcu_jiffies_till_stall_check(void)
25 {
26         int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
27
28         /*
29          * Limit check must be consistent with the Kconfig limits
30          * for CONFIG_RCU_CPU_STALL_TIMEOUT.
31          */
32         if (till_stall_check < 3) {
33                 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
34                 till_stall_check = 3;
35         } else if (till_stall_check > 300) {
36                 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
37                 till_stall_check = 300;
38         }
39         return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
40 }
41 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
42
43 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
44 void rcu_sysrq_start(void)
45 {
46         if (!rcu_cpu_stall_suppress)
47                 rcu_cpu_stall_suppress = 2;
48 }
49
50 void rcu_sysrq_end(void)
51 {
52         if (rcu_cpu_stall_suppress == 2)
53                 rcu_cpu_stall_suppress = 0;
54 }
55
56 /* Don't print RCU CPU stall warnings during a kernel panic. */
57 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
58 {
59         rcu_cpu_stall_suppress = 1;
60         return NOTIFY_DONE;
61 }
62
63 static struct notifier_block rcu_panic_block = {
64         .notifier_call = rcu_panic,
65 };
66
67 static int __init check_cpu_stall_init(void)
68 {
69         atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
70         return 0;
71 }
72 early_initcall(check_cpu_stall_init);
73
74 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
75 static void panic_on_rcu_stall(void)
76 {
77         if (sysctl_panic_on_rcu_stall)
78                 panic("RCU Stall\n");
79 }
80
81 /**
82  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
83  *
84  * Set the stall-warning timeout way off into the future, thus preventing
85  * any RCU CPU stall-warning messages from appearing in the current set of
86  * RCU grace periods.
87  *
88  * The caller must disable hard irqs.
89  */
90 void rcu_cpu_stall_reset(void)
91 {
92         WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
93 }
94
95 //////////////////////////////////////////////////////////////////////////////
96 //
97 // Interaction with RCU grace periods
98
99 /* Start of new grace period, so record stall time (and forcing times). */
100 static void record_gp_stall_check_time(void)
101 {
102         unsigned long j = jiffies;
103         unsigned long j1;
104
105         rcu_state.gp_start = j;
106         j1 = rcu_jiffies_till_stall_check();
107         /* Record ->gp_start before ->jiffies_stall. */
108         smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
109         rcu_state.jiffies_resched = j + j1 / 2;
110         rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
111 }
112
113 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
114 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
115 {
116         rdp->ticks_this_gp = 0;
117         rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
118         WRITE_ONCE(rdp->last_fqs_resched, jiffies);
119 }
120
121 /*
122  * If too much time has passed in the current grace period, and if
123  * so configured, go kick the relevant kthreads.
124  */
125 static void rcu_stall_kick_kthreads(void)
126 {
127         unsigned long j;
128
129         if (!rcu_kick_kthreads)
130                 return;
131         j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
132         if (time_after(jiffies, j) && rcu_state.gp_kthread &&
133             (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
134                 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
135                           rcu_state.name);
136                 rcu_ftrace_dump(DUMP_ALL);
137                 wake_up_process(rcu_state.gp_kthread);
138                 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
139         }
140 }
141
142 /*
143  * Handler for the irq_work request posted about halfway into the RCU CPU
144  * stall timeout, and used to detect excessive irq disabling.  Set state
145  * appropriately, but just complain if there is unexpected state on entry.
146  */
147 static void rcu_iw_handler(struct irq_work *iwp)
148 {
149         struct rcu_data *rdp;
150         struct rcu_node *rnp;
151
152         rdp = container_of(iwp, struct rcu_data, rcu_iw);
153         rnp = rdp->mynode;
154         raw_spin_lock_rcu_node(rnp);
155         if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
156                 rdp->rcu_iw_gp_seq = rnp->gp_seq;
157                 rdp->rcu_iw_pending = false;
158         }
159         raw_spin_unlock_rcu_node(rnp);
160 }
161
162 //////////////////////////////////////////////////////////////////////////////
163 //
164 // Printing RCU CPU stall warnings
165
166 #ifdef CONFIG_PREEMPT_RCU
167
168 /*
169  * Dump detailed information for all tasks blocking the current RCU
170  * grace period on the specified rcu_node structure.
171  */
172 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
173 {
174         unsigned long flags;
175         struct task_struct *t;
176
177         raw_spin_lock_irqsave_rcu_node(rnp, flags);
178         if (!rcu_preempt_blocked_readers_cgp(rnp)) {
179                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
180                 return;
181         }
182         t = list_entry(rnp->gp_tasks->prev,
183                        struct task_struct, rcu_node_entry);
184         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
185                 /*
186                  * We could be printing a lot while holding a spinlock.
187                  * Avoid triggering hard lockup.
188                  */
189                 touch_nmi_watchdog();
190                 sched_show_task(t);
191         }
192         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
193 }
194
195 /*
196  * Scan the current list of tasks blocked within RCU read-side critical
197  * sections, printing out the tid of each.
198  */
199 static int rcu_print_task_stall(struct rcu_node *rnp)
200 {
201         struct task_struct *t;
202         int ndetected = 0;
203
204         if (!rcu_preempt_blocked_readers_cgp(rnp))
205                 return 0;
206         pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
207                rnp->level, rnp->grplo, rnp->grphi);
208         t = list_entry(rnp->gp_tasks->prev,
209                        struct task_struct, rcu_node_entry);
210         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
211                 pr_cont(" P%d", t->pid);
212                 ndetected++;
213         }
214         pr_cont("\n");
215         return ndetected;
216 }
217
218 #else /* #ifdef CONFIG_PREEMPT_RCU */
219
220 /*
221  * Because preemptible RCU does not exist, we never have to check for
222  * tasks blocked within RCU read-side critical sections.
223  */
224 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
225 {
226 }
227
228 /*
229  * Because preemptible RCU does not exist, we never have to check for
230  * tasks blocked within RCU read-side critical sections.
231  */
232 static int rcu_print_task_stall(struct rcu_node *rnp)
233 {
234         return 0;
235 }
236 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
237
238 /*
239  * Dump stacks of all tasks running on stalled CPUs.  First try using
240  * NMIs, but fall back to manual remote stack tracing on architectures
241  * that don't support NMI-based stack dumps.  The NMI-triggered stack
242  * traces are more accurate because they are printed by the target CPU.
243  */
244 static void rcu_dump_cpu_stacks(void)
245 {
246         int cpu;
247         unsigned long flags;
248         struct rcu_node *rnp;
249
250         rcu_for_each_leaf_node(rnp) {
251                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
252                 for_each_leaf_node_possible_cpu(rnp, cpu)
253                         if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
254                                 if (!trigger_single_cpu_backtrace(cpu))
255                                         dump_cpu_task(cpu);
256                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
257         }
258 }
259
260 #ifdef CONFIG_RCU_FAST_NO_HZ
261
262 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
263 {
264         struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
265
266         sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
267                 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
268                 !!rdp->tick_nohz_enabled_snap);
269 }
270
271 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
272
273 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
274 {
275         *cp = '\0';
276 }
277
278 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
279
280 static const char * const gp_state_names[] = {
281         [RCU_GP_IDLE] = "RCU_GP_IDLE",
282         [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
283         [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
284         [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
285         [RCU_GP_INIT] = "RCU_GP_INIT",
286         [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
287         [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
288         [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
289         [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
290 };
291
292 /*
293  * Convert a ->gp_state value to a character string.
294  */
295 static const char *gp_state_getname(short gs)
296 {
297         if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
298                 return "???";
299         return gp_state_names[gs];
300 }
301
302 /*
303  * Print out diagnostic information for the specified stalled CPU.
304  *
305  * If the specified CPU is aware of the current RCU grace period, then
306  * print the number of scheduling clock interrupts the CPU has taken
307  * during the time that it has been aware.  Otherwise, print the number
308  * of RCU grace periods that this CPU is ignorant of, for example, "1"
309  * if the CPU was aware of the previous grace period.
310  *
311  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
312  */
313 static void print_cpu_stall_info(int cpu)
314 {
315         unsigned long delta;
316         char fast_no_hz[72];
317         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
318         char *ticks_title;
319         unsigned long ticks_value;
320
321         /*
322          * We could be printing a lot while holding a spinlock.  Avoid
323          * triggering hard lockup.
324          */
325         touch_nmi_watchdog();
326
327         ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
328         if (ticks_value) {
329                 ticks_title = "GPs behind";
330         } else {
331                 ticks_title = "ticks this GP";
332                 ticks_value = rdp->ticks_this_gp;
333         }
334         print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
335         delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
336         pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
337                cpu,
338                "O."[!!cpu_online(cpu)],
339                "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
340                "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
341                !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
342                         rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
343                                 "!."[!delta],
344                ticks_value, ticks_title,
345                rcu_dynticks_snap(rdp) & 0xfff,
346                rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
347                rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
348                READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
349                fast_no_hz);
350 }
351
352 /* Complain about starvation of grace-period kthread.  */
353 static void rcu_check_gp_kthread_starvation(void)
354 {
355         struct task_struct *gpk = rcu_state.gp_kthread;
356         unsigned long j;
357
358         j = jiffies - READ_ONCE(rcu_state.gp_activity);
359         if (j > 2 * HZ) {
360                 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
361                        rcu_state.name, j,
362                        (long)rcu_seq_current(&rcu_state.gp_seq),
363                        READ_ONCE(rcu_state.gp_flags),
364                        gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
365                        gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
366                 if (gpk) {
367                         pr_err("RCU grace-period kthread stack dump:\n");
368                         sched_show_task(gpk);
369                         wake_up_process(gpk);
370                 }
371         }
372 }
373
374 static void print_other_cpu_stall(unsigned long gp_seq)
375 {
376         int cpu;
377         unsigned long flags;
378         unsigned long gpa;
379         unsigned long j;
380         int ndetected = 0;
381         struct rcu_node *rnp;
382         long totqlen = 0;
383
384         /* Kick and suppress, if so configured. */
385         rcu_stall_kick_kthreads();
386         if (rcu_cpu_stall_suppress)
387                 return;
388
389         /*
390          * OK, time to rat on our buddy...
391          * See Documentation/RCU/stallwarn.txt for info on how to debug
392          * RCU CPU stall warnings.
393          */
394         pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
395         rcu_for_each_leaf_node(rnp) {
396                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
397                 ndetected += rcu_print_task_stall(rnp);
398                 if (rnp->qsmask != 0) {
399                         for_each_leaf_node_possible_cpu(rnp, cpu)
400                                 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
401                                         print_cpu_stall_info(cpu);
402                                         ndetected++;
403                                 }
404                 }
405                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406         }
407
408         for_each_possible_cpu(cpu)
409                 totqlen += rcu_get_n_cbs_cpu(cpu);
410         pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
411                smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
412                (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
413         if (ndetected) {
414                 rcu_dump_cpu_stacks();
415
416                 /* Complain about tasks blocking the grace period. */
417                 rcu_for_each_leaf_node(rnp)
418                         rcu_print_detail_task_stall_rnp(rnp);
419         } else {
420                 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
421                         pr_err("INFO: Stall ended before state dump start\n");
422                 } else {
423                         j = jiffies;
424                         gpa = READ_ONCE(rcu_state.gp_activity);
425                         pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
426                                rcu_state.name, j - gpa, j, gpa,
427                                READ_ONCE(jiffies_till_next_fqs),
428                                rcu_get_root()->qsmask);
429                         /* In this case, the current CPU might be at fault. */
430                         sched_show_task(current);
431                 }
432         }
433         /* Rewrite if needed in case of slow consoles. */
434         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
435                 WRITE_ONCE(rcu_state.jiffies_stall,
436                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
437
438         rcu_check_gp_kthread_starvation();
439
440         panic_on_rcu_stall();
441
442         rcu_force_quiescent_state();  /* Kick them all. */
443 }
444
445 static void print_cpu_stall(void)
446 {
447         int cpu;
448         unsigned long flags;
449         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
450         struct rcu_node *rnp = rcu_get_root();
451         long totqlen = 0;
452
453         /* Kick and suppress, if so configured. */
454         rcu_stall_kick_kthreads();
455         if (rcu_cpu_stall_suppress)
456                 return;
457
458         /*
459          * OK, time to rat on ourselves...
460          * See Documentation/RCU/stallwarn.txt for info on how to debug
461          * RCU CPU stall warnings.
462          */
463         pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
464         raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
465         print_cpu_stall_info(smp_processor_id());
466         raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
467         for_each_possible_cpu(cpu)
468                 totqlen += rcu_get_n_cbs_cpu(cpu);
469         pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
470                 jiffies - rcu_state.gp_start,
471                 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
472
473         rcu_check_gp_kthread_starvation();
474
475         rcu_dump_cpu_stacks();
476
477         raw_spin_lock_irqsave_rcu_node(rnp, flags);
478         /* Rewrite if needed in case of slow consoles. */
479         if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
480                 WRITE_ONCE(rcu_state.jiffies_stall,
481                            jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
482         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
483
484         panic_on_rcu_stall();
485
486         /*
487          * Attempt to revive the RCU machinery by forcing a context switch.
488          *
489          * A context switch would normally allow the RCU state machine to make
490          * progress and it could be we're stuck in kernel space without context
491          * switches for an entirely unreasonable amount of time.
492          */
493         set_tsk_need_resched(current);
494         set_preempt_need_resched();
495 }
496
497 static void check_cpu_stall(struct rcu_data *rdp)
498 {
499         unsigned long gs1;
500         unsigned long gs2;
501         unsigned long gps;
502         unsigned long j;
503         unsigned long jn;
504         unsigned long js;
505         struct rcu_node *rnp;
506
507         if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
508             !rcu_gp_in_progress())
509                 return;
510         rcu_stall_kick_kthreads();
511         j = jiffies;
512
513         /*
514          * Lots of memory barriers to reject false positives.
515          *
516          * The idea is to pick up rcu_state.gp_seq, then
517          * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
518          * another copy of rcu_state.gp_seq.  These values are updated in
519          * the opposite order with memory barriers (or equivalent) during
520          * grace-period initialization and cleanup.  Now, a false positive
521          * can occur if we get an new value of rcu_state.gp_start and a old
522          * value of rcu_state.jiffies_stall.  But given the memory barriers,
523          * the only way that this can happen is if one grace period ends
524          * and another starts between these two fetches.  This is detected
525          * by comparing the second fetch of rcu_state.gp_seq with the
526          * previous fetch from rcu_state.gp_seq.
527          *
528          * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
529          * and rcu_state.gp_start suffice to forestall false positives.
530          */
531         gs1 = READ_ONCE(rcu_state.gp_seq);
532         smp_rmb(); /* Pick up ->gp_seq first... */
533         js = READ_ONCE(rcu_state.jiffies_stall);
534         smp_rmb(); /* ...then ->jiffies_stall before the rest... */
535         gps = READ_ONCE(rcu_state.gp_start);
536         smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
537         gs2 = READ_ONCE(rcu_state.gp_seq);
538         if (gs1 != gs2 ||
539             ULONG_CMP_LT(j, js) ||
540             ULONG_CMP_GE(gps, js))
541                 return; /* No stall or GP completed since entering function. */
542         rnp = rdp->mynode;
543         jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
544         if (rcu_gp_in_progress() &&
545             (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
546             cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
547
548                 /* We haven't checked in, so go dump stack. */
549                 print_cpu_stall();
550                 if (rcu_cpu_stall_ftrace_dump)
551                         rcu_ftrace_dump(DUMP_ALL);
552
553         } else if (rcu_gp_in_progress() &&
554                    ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
555                    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
556
557                 /* They had a few time units to dump stack, so complain. */
558                 print_other_cpu_stall(gs2);
559                 if (rcu_cpu_stall_ftrace_dump)
560                         rcu_ftrace_dump(DUMP_ALL);
561         }
562 }
563
564 //////////////////////////////////////////////////////////////////////////////
565 //
566 // RCU forward-progress mechanisms, including of callback invocation.
567
568
569 /*
570  * Show the state of the grace-period kthreads.
571  */
572 void show_rcu_gp_kthreads(void)
573 {
574         int cpu;
575         unsigned long j;
576         unsigned long ja;
577         unsigned long jr;
578         unsigned long jw;
579         struct rcu_data *rdp;
580         struct rcu_node *rnp;
581
582         j = jiffies;
583         ja = j - READ_ONCE(rcu_state.gp_activity);
584         jr = j - READ_ONCE(rcu_state.gp_req_activity);
585         jw = j - READ_ONCE(rcu_state.gp_wake_time);
586         pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
587                 rcu_state.name, gp_state_getname(rcu_state.gp_state),
588                 rcu_state.gp_state,
589                 rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
590                 ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
591                 (long)READ_ONCE(rcu_state.gp_seq),
592                 (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
593                 READ_ONCE(rcu_state.gp_flags));
594         rcu_for_each_node_breadth_first(rnp) {
595                 if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
596                         continue;
597                 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
598                         rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
599                         (long)rnp->gp_seq_needed);
600                 if (!rcu_is_leaf_node(rnp))
601                         continue;
602                 for_each_leaf_node_possible_cpu(rnp, cpu) {
603                         rdp = per_cpu_ptr(&rcu_data, cpu);
604                         if (rdp->gpwrap ||
605                             ULONG_CMP_GE(rcu_state.gp_seq,
606                                          rdp->gp_seq_needed))
607                                 continue;
608                         pr_info("\tcpu %d ->gp_seq_needed %ld\n",
609                                 cpu, (long)rdp->gp_seq_needed);
610                 }
611         }
612         for_each_possible_cpu(cpu) {
613                 rdp = per_cpu_ptr(&rcu_data, cpu);
614                 if (rcu_segcblist_is_offloaded(&rdp->cblist))
615                         show_rcu_nocb_state(rdp);
616         }
617         /* sched_show_task(rcu_state.gp_kthread); */
618 }
619 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
620
621 /*
622  * This function checks for grace-period requests that fail to motivate
623  * RCU to come out of its idle mode.
624  */
625 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
626                                      const unsigned long gpssdelay)
627 {
628         unsigned long flags;
629         unsigned long j;
630         struct rcu_node *rnp_root = rcu_get_root();
631         static atomic_t warned = ATOMIC_INIT(0);
632
633         if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
634             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
635                 return;
636         j = jiffies; /* Expensive access, and in common case don't get here. */
637         if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
638             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
639             atomic_read(&warned))
640                 return;
641
642         raw_spin_lock_irqsave_rcu_node(rnp, flags);
643         j = jiffies;
644         if (rcu_gp_in_progress() ||
645             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
646             time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
647             time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
648             atomic_read(&warned)) {
649                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
650                 return;
651         }
652         /* Hold onto the leaf lock to make others see warned==1. */
653
654         if (rnp_root != rnp)
655                 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
656         j = jiffies;
657         if (rcu_gp_in_progress() ||
658             ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
659             time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
660             time_before(j, rcu_state.gp_activity + gpssdelay) ||
661             atomic_xchg(&warned, 1)) {
662                 if (rnp_root != rnp)
663                         /* irqs remain disabled. */
664                         raw_spin_unlock_rcu_node(rnp_root);
665                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
666                 return;
667         }
668         WARN_ON(1);
669         if (rnp_root != rnp)
670                 raw_spin_unlock_rcu_node(rnp_root);
671         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
672         show_rcu_gp_kthreads();
673 }
674
675 /*
676  * Do a forward-progress check for rcutorture.  This is normally invoked
677  * due to an OOM event.  The argument "j" gives the time period during
678  * which rcutorture would like progress to have been made.
679  */
680 void rcu_fwd_progress_check(unsigned long j)
681 {
682         unsigned long cbs;
683         int cpu;
684         unsigned long max_cbs = 0;
685         int max_cpu = -1;
686         struct rcu_data *rdp;
687
688         if (rcu_gp_in_progress()) {
689                 pr_info("%s: GP age %lu jiffies\n",
690                         __func__, jiffies - rcu_state.gp_start);
691                 show_rcu_gp_kthreads();
692         } else {
693                 pr_info("%s: Last GP end %lu jiffies ago\n",
694                         __func__, jiffies - rcu_state.gp_end);
695                 preempt_disable();
696                 rdp = this_cpu_ptr(&rcu_data);
697                 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
698                 preempt_enable();
699         }
700         for_each_possible_cpu(cpu) {
701                 cbs = rcu_get_n_cbs_cpu(cpu);
702                 if (!cbs)
703                         continue;
704                 if (max_cpu < 0)
705                         pr_info("%s: callbacks", __func__);
706                 pr_cont(" %d: %lu", cpu, cbs);
707                 if (cbs <= max_cbs)
708                         continue;
709                 max_cbs = cbs;
710                 max_cpu = cpu;
711         }
712         if (max_cpu >= 0)
713                 pr_cont("\n");
714 }
715 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
716
717 /* Commandeer a sysrq key to dump RCU's tree. */
718 static bool sysrq_rcu;
719 module_param(sysrq_rcu, bool, 0444);
720
721 /* Dump grace-period-request information due to commandeered sysrq. */
722 static void sysrq_show_rcu(int key)
723 {
724         show_rcu_gp_kthreads();
725 }
726
727 static struct sysrq_key_op sysrq_rcudump_op = {
728         .handler = sysrq_show_rcu,
729         .help_msg = "show-rcu(y)",
730         .action_msg = "Show RCU tree",
731         .enable_mask = SYSRQ_ENABLE_DUMP,
732 };
733
734 static int __init rcu_sysrq_init(void)
735 {
736         if (sysrq_rcu)
737                 return register_sysrq_key('y', &sysrq_rcudump_op);
738         return 0;
739 }
740 early_initcall(rcu_sysrq_init);