]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/rcu/rcutorture.c
rcutorture: Correctly handle grace-period sequence wrap
[linux.git] / kernel / rcu / rcutorture.c
1 /*
2  * Read-Copy Update module-based torture test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2005, 2006
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *        Josh Triplett <josh@joshtriplett.org>
22  *
23  * See also:  Documentation/RCU/torture.txt
24  */
25
26 #define pr_fmt(fmt) fmt
27
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/kthread.h>
33 #include <linux/err.h>
34 #include <linux/spinlock.h>
35 #include <linux/smp.h>
36 #include <linux/rcupdate.h>
37 #include <linux/interrupt.h>
38 #include <linux/sched/signal.h>
39 #include <uapi/linux/sched/types.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/percpu.h>
45 #include <linux/notifier.h>
46 #include <linux/reboot.h>
47 #include <linux/freezer.h>
48 #include <linux/cpu.h>
49 #include <linux/delay.h>
50 #include <linux/stat.h>
51 #include <linux/srcu.h>
52 #include <linux/slab.h>
53 #include <linux/trace_clock.h>
54 #include <asm/byteorder.h>
55 #include <linux/torture.h>
56 #include <linux/vmalloc.h>
57 #include <linux/sched/debug.h>
58
59 #include "rcu.h"
60
61 MODULE_LICENSE("GPL");
62 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
63
64
65 torture_param(int, cbflood_inter_holdoff, HZ,
66               "Holdoff between floods (jiffies)");
67 torture_param(int, cbflood_intra_holdoff, 1,
68               "Holdoff between bursts (jiffies)");
69 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
70 torture_param(int, cbflood_n_per_burst, 20000,
71               "# callbacks per burst in flood");
72 torture_param(int, fqs_duration, 0,
73               "Duration of fqs bursts (us), 0 to disable");
74 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
75 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
76 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
77 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
78 torture_param(bool, gp_normal, false,
79              "Use normal (non-expedited) GP wait primitives");
80 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
81 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
82 torture_param(int, n_barrier_cbs, 0,
83              "# of callbacks/kthreads for barrier testing");
84 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
85 torture_param(int, nreaders, -1, "Number of RCU reader threads");
86 torture_param(int, object_debug, 0,
87              "Enable debug-object double call_rcu() testing");
88 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
89 torture_param(int, onoff_interval, 0,
90              "Time between CPU hotplugs (s), 0=disable");
91 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
92 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
93 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
94 torture_param(int, stall_cpu_holdoff, 10,
95              "Time to wait before starting stall (s).");
96 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
97 torture_param(int, stat_interval, 60,
98              "Number of seconds between stats printk()s");
99 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
100 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
101 torture_param(int, test_boost_duration, 4,
102              "Duration of each boost test, seconds.");
103 torture_param(int, test_boost_interval, 7,
104              "Interval between boost tests, seconds.");
105 torture_param(bool, test_no_idle_hz, true,
106              "Test support for tickless idle CPUs");
107 torture_param(int, verbose, 1,
108              "Enable verbose debugging printk()s");
109
110 static char *torture_type = "rcu";
111 module_param(torture_type, charp, 0444);
112 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
113
114 static int nrealreaders;
115 static int ncbflooders;
116 static struct task_struct *writer_task;
117 static struct task_struct **fakewriter_tasks;
118 static struct task_struct **reader_tasks;
119 static struct task_struct *stats_task;
120 static struct task_struct **cbflood_task;
121 static struct task_struct *fqs_task;
122 static struct task_struct *boost_tasks[NR_CPUS];
123 static struct task_struct *stall_task;
124 static struct task_struct **barrier_cbs_tasks;
125 static struct task_struct *barrier_task;
126
127 #define RCU_TORTURE_PIPE_LEN 10
128
129 struct rcu_torture {
130         struct rcu_head rtort_rcu;
131         int rtort_pipe_count;
132         struct list_head rtort_free;
133         int rtort_mbtest;
134 };
135
136 static LIST_HEAD(rcu_torture_freelist);
137 static struct rcu_torture __rcu *rcu_torture_current;
138 static unsigned long rcu_torture_current_version;
139 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
140 static DEFINE_SPINLOCK(rcu_torture_lock);
141 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
142 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
143 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
144 static atomic_t n_rcu_torture_alloc;
145 static atomic_t n_rcu_torture_alloc_fail;
146 static atomic_t n_rcu_torture_free;
147 static atomic_t n_rcu_torture_mberror;
148 static atomic_t n_rcu_torture_error;
149 static long n_rcu_torture_barrier_error;
150 static long n_rcu_torture_boost_ktrerror;
151 static long n_rcu_torture_boost_rterror;
152 static long n_rcu_torture_boost_failure;
153 static long n_rcu_torture_boosts;
154 static long n_rcu_torture_timers;
155 static long n_barrier_attempts;
156 static long n_barrier_successes;
157 static atomic_long_t n_cbfloods;
158 static struct list_head rcu_torture_removed;
159
160 static int rcu_torture_writer_state;
161 #define RTWS_FIXED_DELAY        0
162 #define RTWS_DELAY              1
163 #define RTWS_REPLACE            2
164 #define RTWS_DEF_FREE           3
165 #define RTWS_EXP_SYNC           4
166 #define RTWS_COND_GET           5
167 #define RTWS_COND_SYNC          6
168 #define RTWS_SYNC               7
169 #define RTWS_STUTTER            8
170 #define RTWS_STOPPING           9
171 static const char * const rcu_torture_writer_state_names[] = {
172         "RTWS_FIXED_DELAY",
173         "RTWS_DELAY",
174         "RTWS_REPLACE",
175         "RTWS_DEF_FREE",
176         "RTWS_EXP_SYNC",
177         "RTWS_COND_GET",
178         "RTWS_COND_SYNC",
179         "RTWS_SYNC",
180         "RTWS_STUTTER",
181         "RTWS_STOPPING",
182 };
183
184 static const char *rcu_torture_writer_state_getname(void)
185 {
186         unsigned int i = READ_ONCE(rcu_torture_writer_state);
187
188         if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
189                 return "???";
190         return rcu_torture_writer_state_names[i];
191 }
192
193 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
194 #define rcu_can_boost() 1
195 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
196 #define rcu_can_boost() 0
197 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
198
199 #ifdef CONFIG_RCU_TRACE
200 static u64 notrace rcu_trace_clock_local(void)
201 {
202         u64 ts = trace_clock_local();
203
204         (void)do_div(ts, NSEC_PER_USEC);
205         return ts;
206 }
207 #else /* #ifdef CONFIG_RCU_TRACE */
208 static u64 notrace rcu_trace_clock_local(void)
209 {
210         return 0ULL;
211 }
212 #endif /* #else #ifdef CONFIG_RCU_TRACE */
213
214 static unsigned long boost_starttime;   /* jiffies of next boost test start. */
215 static DEFINE_MUTEX(boost_mutex);       /* protect setting boost_starttime */
216                                         /*  and boost task create/destroy. */
217 static atomic_t barrier_cbs_count;      /* Barrier callbacks registered. */
218 static bool barrier_phase;              /* Test phase. */
219 static atomic_t barrier_cbs_invoked;    /* Barrier callbacks invoked. */
220 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
221 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
222
223 /*
224  * Allocate an element from the rcu_tortures pool.
225  */
226 static struct rcu_torture *
227 rcu_torture_alloc(void)
228 {
229         struct list_head *p;
230
231         spin_lock_bh(&rcu_torture_lock);
232         if (list_empty(&rcu_torture_freelist)) {
233                 atomic_inc(&n_rcu_torture_alloc_fail);
234                 spin_unlock_bh(&rcu_torture_lock);
235                 return NULL;
236         }
237         atomic_inc(&n_rcu_torture_alloc);
238         p = rcu_torture_freelist.next;
239         list_del_init(p);
240         spin_unlock_bh(&rcu_torture_lock);
241         return container_of(p, struct rcu_torture, rtort_free);
242 }
243
244 /*
245  * Free an element to the rcu_tortures pool.
246  */
247 static void
248 rcu_torture_free(struct rcu_torture *p)
249 {
250         atomic_inc(&n_rcu_torture_free);
251         spin_lock_bh(&rcu_torture_lock);
252         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
253         spin_unlock_bh(&rcu_torture_lock);
254 }
255
256 /*
257  * Operations vector for selecting different types of tests.
258  */
259
260 struct rcu_torture_ops {
261         int ttype;
262         void (*init)(void);
263         void (*cleanup)(void);
264         int (*readlock)(void);
265         void (*read_delay)(struct torture_random_state *rrsp);
266         void (*readunlock)(int idx);
267         unsigned long (*get_gp_seq)(void);
268         unsigned long (*gp_diff)(unsigned long new, unsigned long old);
269         void (*deferred_free)(struct rcu_torture *p);
270         void (*sync)(void);
271         void (*exp_sync)(void);
272         unsigned long (*get_state)(void);
273         void (*cond_sync)(unsigned long oldstate);
274         call_rcu_func_t call;
275         void (*cb_barrier)(void);
276         void (*fqs)(void);
277         void (*stats)(void);
278         int irq_capable;
279         int can_boost;
280         const char *name;
281 };
282
283 static struct rcu_torture_ops *cur_ops;
284
285 /*
286  * Definitions for rcu torture testing.
287  */
288
289 static int rcu_torture_read_lock(void) __acquires(RCU)
290 {
291         rcu_read_lock();
292         return 0;
293 }
294
295 static void rcu_read_delay(struct torture_random_state *rrsp)
296 {
297         unsigned long started;
298         unsigned long completed;
299         const unsigned long shortdelay_us = 200;
300         const unsigned long longdelay_ms = 50;
301         unsigned long long ts;
302
303         /* We want a short delay sometimes to make a reader delay the grace
304          * period, and we want a long delay occasionally to trigger
305          * force_quiescent_state. */
306
307         if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
308                 started = cur_ops->get_gp_seq();
309                 ts = rcu_trace_clock_local();
310                 mdelay(longdelay_ms);
311                 completed = cur_ops->get_gp_seq();
312                 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
313                                           started, completed);
314         }
315         if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
316                 udelay(shortdelay_us);
317         if (!preempt_count() &&
318             !(torture_random(rrsp) % (nrealreaders * 500)))
319                 torture_preempt_schedule();  /* QS only if preemptible. */
320 }
321
322 static void rcu_torture_read_unlock(int idx) __releases(RCU)
323 {
324         rcu_read_unlock();
325 }
326
327 /*
328  * Update callback in the pipe.  This should be invoked after a grace period.
329  */
330 static bool
331 rcu_torture_pipe_update_one(struct rcu_torture *rp)
332 {
333         int i;
334
335         i = rp->rtort_pipe_count;
336         if (i > RCU_TORTURE_PIPE_LEN)
337                 i = RCU_TORTURE_PIPE_LEN;
338         atomic_inc(&rcu_torture_wcount[i]);
339         if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
340                 rp->rtort_mbtest = 0;
341                 return true;
342         }
343         return false;
344 }
345
346 /*
347  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
348  * primitives.
349  */
350 static void
351 rcu_torture_pipe_update(struct rcu_torture *old_rp)
352 {
353         struct rcu_torture *rp;
354         struct rcu_torture *rp1;
355
356         if (old_rp)
357                 list_add(&old_rp->rtort_free, &rcu_torture_removed);
358         list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
359                 if (rcu_torture_pipe_update_one(rp)) {
360                         list_del(&rp->rtort_free);
361                         rcu_torture_free(rp);
362                 }
363         }
364 }
365
366 static void
367 rcu_torture_cb(struct rcu_head *p)
368 {
369         struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
370
371         if (torture_must_stop_irq()) {
372                 /* Test is ending, just drop callbacks on the floor. */
373                 /* The next initialization will pick up the pieces. */
374                 return;
375         }
376         if (rcu_torture_pipe_update_one(rp))
377                 rcu_torture_free(rp);
378         else
379                 cur_ops->deferred_free(rp);
380 }
381
382 static unsigned long rcu_no_completed(void)
383 {
384         return 0;
385 }
386
387 static void rcu_torture_deferred_free(struct rcu_torture *p)
388 {
389         call_rcu(&p->rtort_rcu, rcu_torture_cb);
390 }
391
392 static void rcu_sync_torture_init(void)
393 {
394         INIT_LIST_HEAD(&rcu_torture_removed);
395 }
396
397 static struct rcu_torture_ops rcu_ops = {
398         .ttype          = RCU_FLAVOR,
399         .init           = rcu_sync_torture_init,
400         .readlock       = rcu_torture_read_lock,
401         .read_delay     = rcu_read_delay,
402         .readunlock     = rcu_torture_read_unlock,
403         .get_gp_seq     = rcu_get_gp_seq,
404         .gp_diff        = rcu_seq_diff,
405         .deferred_free  = rcu_torture_deferred_free,
406         .sync           = synchronize_rcu,
407         .exp_sync       = synchronize_rcu_expedited,
408         .get_state      = get_state_synchronize_rcu,
409         .cond_sync      = cond_synchronize_rcu,
410         .call           = call_rcu,
411         .cb_barrier     = rcu_barrier,
412         .fqs            = rcu_force_quiescent_state,
413         .stats          = NULL,
414         .irq_capable    = 1,
415         .can_boost      = rcu_can_boost(),
416         .name           = "rcu"
417 };
418
419 /*
420  * Definitions for rcu_bh torture testing.
421  */
422
423 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
424 {
425         rcu_read_lock_bh();
426         return 0;
427 }
428
429 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
430 {
431         rcu_read_unlock_bh();
432 }
433
434 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
435 {
436         call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
437 }
438
439 static struct rcu_torture_ops rcu_bh_ops = {
440         .ttype          = RCU_BH_FLAVOR,
441         .init           = rcu_sync_torture_init,
442         .readlock       = rcu_bh_torture_read_lock,
443         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
444         .readunlock     = rcu_bh_torture_read_unlock,
445         .get_gp_seq     = rcu_bh_get_gp_seq,
446         .gp_diff        = rcu_seq_diff,
447         .deferred_free  = rcu_bh_torture_deferred_free,
448         .sync           = synchronize_rcu_bh,
449         .exp_sync       = synchronize_rcu_bh_expedited,
450         .call           = call_rcu_bh,
451         .cb_barrier     = rcu_barrier_bh,
452         .fqs            = rcu_bh_force_quiescent_state,
453         .stats          = NULL,
454         .irq_capable    = 1,
455         .name           = "rcu_bh"
456 };
457
458 /*
459  * Don't even think about trying any of these in real life!!!
460  * The names includes "busted", and they really means it!
461  * The only purpose of these functions is to provide a buggy RCU
462  * implementation to make sure that rcutorture correctly emits
463  * buggy-RCU error messages.
464  */
465 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
466 {
467         /* This is a deliberate bug for testing purposes only! */
468         rcu_torture_cb(&p->rtort_rcu);
469 }
470
471 static void synchronize_rcu_busted(void)
472 {
473         /* This is a deliberate bug for testing purposes only! */
474 }
475
476 static void
477 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
478 {
479         /* This is a deliberate bug for testing purposes only! */
480         func(head);
481 }
482
483 static struct rcu_torture_ops rcu_busted_ops = {
484         .ttype          = INVALID_RCU_FLAVOR,
485         .init           = rcu_sync_torture_init,
486         .readlock       = rcu_torture_read_lock,
487         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
488         .readunlock     = rcu_torture_read_unlock,
489         .get_gp_seq     = rcu_no_completed,
490         .deferred_free  = rcu_busted_torture_deferred_free,
491         .sync           = synchronize_rcu_busted,
492         .exp_sync       = synchronize_rcu_busted,
493         .call           = call_rcu_busted,
494         .cb_barrier     = NULL,
495         .fqs            = NULL,
496         .stats          = NULL,
497         .irq_capable    = 1,
498         .name           = "busted"
499 };
500
501 /*
502  * Definitions for srcu torture testing.
503  */
504
505 DEFINE_STATIC_SRCU(srcu_ctl);
506 static struct srcu_struct srcu_ctld;
507 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
508
509 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
510 {
511         return srcu_read_lock(srcu_ctlp);
512 }
513
514 static void srcu_read_delay(struct torture_random_state *rrsp)
515 {
516         long delay;
517         const long uspertick = 1000000 / HZ;
518         const long longdelay = 10;
519
520         /* We want there to be long-running readers, but not all the time. */
521
522         delay = torture_random(rrsp) %
523                 (nrealreaders * 2 * longdelay * uspertick);
524         if (!delay && in_task())
525                 schedule_timeout_interruptible(longdelay);
526         else
527                 rcu_read_delay(rrsp);
528 }
529
530 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
531 {
532         srcu_read_unlock(srcu_ctlp, idx);
533 }
534
535 static unsigned long srcu_torture_completed(void)
536 {
537         return srcu_batches_completed(srcu_ctlp);
538 }
539
540 static void srcu_torture_deferred_free(struct rcu_torture *rp)
541 {
542         call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
543 }
544
545 static void srcu_torture_synchronize(void)
546 {
547         synchronize_srcu(srcu_ctlp);
548 }
549
550 static void srcu_torture_call(struct rcu_head *head,
551                               rcu_callback_t func)
552 {
553         call_srcu(srcu_ctlp, head, func);
554 }
555
556 static void srcu_torture_barrier(void)
557 {
558         srcu_barrier(srcu_ctlp);
559 }
560
561 static void srcu_torture_stats(void)
562 {
563         srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
564 }
565
566 static void srcu_torture_synchronize_expedited(void)
567 {
568         synchronize_srcu_expedited(srcu_ctlp);
569 }
570
571 static struct rcu_torture_ops srcu_ops = {
572         .ttype          = SRCU_FLAVOR,
573         .init           = rcu_sync_torture_init,
574         .readlock       = srcu_torture_read_lock,
575         .read_delay     = srcu_read_delay,
576         .readunlock     = srcu_torture_read_unlock,
577         .get_gp_seq     = srcu_torture_completed,
578         .deferred_free  = srcu_torture_deferred_free,
579         .sync           = srcu_torture_synchronize,
580         .exp_sync       = srcu_torture_synchronize_expedited,
581         .call           = srcu_torture_call,
582         .cb_barrier     = srcu_torture_barrier,
583         .stats          = srcu_torture_stats,
584         .irq_capable    = 1,
585         .name           = "srcu"
586 };
587
588 static void srcu_torture_init(void)
589 {
590         rcu_sync_torture_init();
591         WARN_ON(init_srcu_struct(&srcu_ctld));
592         srcu_ctlp = &srcu_ctld;
593 }
594
595 static void srcu_torture_cleanup(void)
596 {
597         static DEFINE_TORTURE_RANDOM(rand);
598
599         if (torture_random(&rand) & 0x800)
600                 cleanup_srcu_struct(&srcu_ctld);
601         else
602                 cleanup_srcu_struct_quiesced(&srcu_ctld);
603         srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
604 }
605
606 /* As above, but dynamically allocated. */
607 static struct rcu_torture_ops srcud_ops = {
608         .ttype          = SRCU_FLAVOR,
609         .init           = srcu_torture_init,
610         .cleanup        = srcu_torture_cleanup,
611         .readlock       = srcu_torture_read_lock,
612         .read_delay     = srcu_read_delay,
613         .readunlock     = srcu_torture_read_unlock,
614         .get_gp_seq     = srcu_torture_completed,
615         .deferred_free  = srcu_torture_deferred_free,
616         .sync           = srcu_torture_synchronize,
617         .exp_sync       = srcu_torture_synchronize_expedited,
618         .call           = srcu_torture_call,
619         .cb_barrier     = srcu_torture_barrier,
620         .stats          = srcu_torture_stats,
621         .irq_capable    = 1,
622         .name           = "srcud"
623 };
624
625 /*
626  * Definitions for sched torture testing.
627  */
628
629 static int sched_torture_read_lock(void)
630 {
631         preempt_disable();
632         return 0;
633 }
634
635 static void sched_torture_read_unlock(int idx)
636 {
637         preempt_enable();
638 }
639
640 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
641 {
642         call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
643 }
644
645 static struct rcu_torture_ops sched_ops = {
646         .ttype          = RCU_SCHED_FLAVOR,
647         .init           = rcu_sync_torture_init,
648         .readlock       = sched_torture_read_lock,
649         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
650         .readunlock     = sched_torture_read_unlock,
651         .get_gp_seq     = rcu_sched_get_gp_seq,
652         .gp_diff        = rcu_seq_diff,
653         .deferred_free  = rcu_sched_torture_deferred_free,
654         .sync           = synchronize_sched,
655         .exp_sync       = synchronize_sched_expedited,
656         .get_state      = get_state_synchronize_sched,
657         .cond_sync      = cond_synchronize_sched,
658         .call           = call_rcu_sched,
659         .cb_barrier     = rcu_barrier_sched,
660         .fqs            = rcu_sched_force_quiescent_state,
661         .stats          = NULL,
662         .irq_capable    = 1,
663         .name           = "sched"
664 };
665
666 /*
667  * Definitions for RCU-tasks torture testing.
668  */
669
670 static int tasks_torture_read_lock(void)
671 {
672         return 0;
673 }
674
675 static void tasks_torture_read_unlock(int idx)
676 {
677 }
678
679 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
680 {
681         call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
682 }
683
684 static struct rcu_torture_ops tasks_ops = {
685         .ttype          = RCU_TASKS_FLAVOR,
686         .init           = rcu_sync_torture_init,
687         .readlock       = tasks_torture_read_lock,
688         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
689         .readunlock     = tasks_torture_read_unlock,
690         .get_gp_seq     = rcu_no_completed,
691         .deferred_free  = rcu_tasks_torture_deferred_free,
692         .sync           = synchronize_rcu_tasks,
693         .exp_sync       = synchronize_rcu_tasks,
694         .call           = call_rcu_tasks,
695         .cb_barrier     = rcu_barrier_tasks,
696         .fqs            = NULL,
697         .stats          = NULL,
698         .irq_capable    = 1,
699         .name           = "tasks"
700 };
701
702 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
703 {
704         if (!cur_ops->gp_diff)
705                 return new - old;
706         return cur_ops->gp_diff(new, old);
707 }
708
709 static bool __maybe_unused torturing_tasks(void)
710 {
711         return cur_ops == &tasks_ops;
712 }
713
714 /*
715  * RCU torture priority-boost testing.  Runs one real-time thread per
716  * CPU for moderate bursts, repeatedly registering RCU callbacks and
717  * spinning waiting for them to be invoked.  If a given callback takes
718  * too long to be invoked, we assume that priority inversion has occurred.
719  */
720
721 struct rcu_boost_inflight {
722         struct rcu_head rcu;
723         int inflight;
724 };
725
726 static void rcu_torture_boost_cb(struct rcu_head *head)
727 {
728         struct rcu_boost_inflight *rbip =
729                 container_of(head, struct rcu_boost_inflight, rcu);
730
731         /* Ensure RCU-core accesses precede clearing ->inflight */
732         smp_store_release(&rbip->inflight, 0);
733 }
734
735 static int rcu_torture_boost(void *arg)
736 {
737         unsigned long call_rcu_time;
738         unsigned long endtime;
739         unsigned long oldstarttime;
740         struct rcu_boost_inflight rbi = { .inflight = 0 };
741         struct sched_param sp;
742
743         VERBOSE_TOROUT_STRING("rcu_torture_boost started");
744
745         /* Set real-time priority. */
746         sp.sched_priority = 1;
747         if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
748                 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
749                 n_rcu_torture_boost_rterror++;
750         }
751
752         init_rcu_head_on_stack(&rbi.rcu);
753         /* Each pass through the following loop does one boost-test cycle. */
754         do {
755                 /* Wait for the next test interval. */
756                 oldstarttime = boost_starttime;
757                 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
758                         schedule_timeout_interruptible(oldstarttime - jiffies);
759                         stutter_wait("rcu_torture_boost");
760                         if (torture_must_stop())
761                                 goto checkwait;
762                 }
763
764                 /* Do one boost-test interval. */
765                 endtime = oldstarttime + test_boost_duration * HZ;
766                 call_rcu_time = jiffies;
767                 while (ULONG_CMP_LT(jiffies, endtime)) {
768                         /* If we don't have a callback in flight, post one. */
769                         if (!smp_load_acquire(&rbi.inflight)) {
770                                 /* RCU core before ->inflight = 1. */
771                                 smp_store_release(&rbi.inflight, 1);
772                                 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
773                                 if (jiffies - call_rcu_time >
774                                          test_boost_duration * HZ - HZ / 2) {
775                                         VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
776                                         n_rcu_torture_boost_failure++;
777                                 }
778                                 call_rcu_time = jiffies;
779                         }
780                         stutter_wait("rcu_torture_boost");
781                         if (torture_must_stop())
782                                 goto checkwait;
783                 }
784
785                 /*
786                  * Set the start time of the next test interval.
787                  * Yes, this is vulnerable to long delays, but such
788                  * delays simply cause a false negative for the next
789                  * interval.  Besides, we are running at RT priority,
790                  * so delays should be relatively rare.
791                  */
792                 while (oldstarttime == boost_starttime &&
793                        !kthread_should_stop()) {
794                         if (mutex_trylock(&boost_mutex)) {
795                                 boost_starttime = jiffies +
796                                                   test_boost_interval * HZ;
797                                 n_rcu_torture_boosts++;
798                                 mutex_unlock(&boost_mutex);
799                                 break;
800                         }
801                         schedule_timeout_uninterruptible(1);
802                 }
803
804                 /* Go do the stutter. */
805 checkwait:      stutter_wait("rcu_torture_boost");
806         } while (!torture_must_stop());
807
808         /* Clean up and exit. */
809         while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
810                 torture_shutdown_absorb("rcu_torture_boost");
811                 schedule_timeout_uninterruptible(1);
812         }
813         destroy_rcu_head_on_stack(&rbi.rcu);
814         torture_kthread_stopping("rcu_torture_boost");
815         return 0;
816 }
817
818 static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
819 {
820 }
821
822 /*
823  * RCU torture callback-flood kthread.  Repeatedly induces bursts of calls
824  * to call_rcu() or analogous, increasing the probability of occurrence
825  * of callback-overflow corner cases.
826  */
827 static int
828 rcu_torture_cbflood(void *arg)
829 {
830         int err = 1;
831         int i;
832         int j;
833         struct rcu_head *rhp;
834
835         if (cbflood_n_per_burst > 0 &&
836             cbflood_inter_holdoff > 0 &&
837             cbflood_intra_holdoff > 0 &&
838             cur_ops->call &&
839             cur_ops->cb_barrier) {
840                 rhp = vmalloc(array3_size(cbflood_n_burst,
841                                           cbflood_n_per_burst,
842                                           sizeof(*rhp)));
843                 err = !rhp;
844         }
845         if (err) {
846                 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
847                 goto wait_for_stop;
848         }
849         VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
850         do {
851                 schedule_timeout_interruptible(cbflood_inter_holdoff);
852                 atomic_long_inc(&n_cbfloods);
853                 WARN_ON(signal_pending(current));
854                 for (i = 0; i < cbflood_n_burst; i++) {
855                         for (j = 0; j < cbflood_n_per_burst; j++) {
856                                 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
857                                               rcu_torture_cbflood_cb);
858                         }
859                         schedule_timeout_interruptible(cbflood_intra_holdoff);
860                         WARN_ON(signal_pending(current));
861                 }
862                 cur_ops->cb_barrier();
863                 stutter_wait("rcu_torture_cbflood");
864         } while (!torture_must_stop());
865         vfree(rhp);
866 wait_for_stop:
867         torture_kthread_stopping("rcu_torture_cbflood");
868         return 0;
869 }
870
871 /*
872  * RCU torture force-quiescent-state kthread.  Repeatedly induces
873  * bursts of calls to force_quiescent_state(), increasing the probability
874  * of occurrence of some important types of race conditions.
875  */
876 static int
877 rcu_torture_fqs(void *arg)
878 {
879         unsigned long fqs_resume_time;
880         int fqs_burst_remaining;
881
882         VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
883         do {
884                 fqs_resume_time = jiffies + fqs_stutter * HZ;
885                 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
886                        !kthread_should_stop()) {
887                         schedule_timeout_interruptible(1);
888                 }
889                 fqs_burst_remaining = fqs_duration;
890                 while (fqs_burst_remaining > 0 &&
891                        !kthread_should_stop()) {
892                         cur_ops->fqs();
893                         udelay(fqs_holdoff);
894                         fqs_burst_remaining -= fqs_holdoff;
895                 }
896                 stutter_wait("rcu_torture_fqs");
897         } while (!torture_must_stop());
898         torture_kthread_stopping("rcu_torture_fqs");
899         return 0;
900 }
901
902 /*
903  * RCU torture writer kthread.  Repeatedly substitutes a new structure
904  * for that pointed to by rcu_torture_current, freeing the old structure
905  * after a series of grace periods (the "pipeline").
906  */
907 static int
908 rcu_torture_writer(void *arg)
909 {
910         bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
911         int expediting = 0;
912         unsigned long gp_snap;
913         bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
914         bool gp_sync1 = gp_sync;
915         int i;
916         struct rcu_torture *rp;
917         struct rcu_torture *old_rp;
918         static DEFINE_TORTURE_RANDOM(rand);
919         int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
920                            RTWS_COND_GET, RTWS_SYNC };
921         int nsynctypes = 0;
922
923         VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
924         if (!can_expedite)
925                 pr_alert("%s" TORTURE_FLAG
926                          " GP expediting controlled from boot/sysfs for %s.\n",
927                          torture_type, cur_ops->name);
928
929         /* Initialize synctype[] array.  If none set, take default. */
930         if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
931                 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
932         if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
933                 synctype[nsynctypes++] = RTWS_COND_GET;
934                 pr_info("%s: Testing conditional GPs.\n", __func__);
935         } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
936                 pr_alert("%s: gp_cond without primitives.\n", __func__);
937         }
938         if (gp_exp1 && cur_ops->exp_sync) {
939                 synctype[nsynctypes++] = RTWS_EXP_SYNC;
940                 pr_info("%s: Testing expedited GPs.\n", __func__);
941         } else if (gp_exp && !cur_ops->exp_sync) {
942                 pr_alert("%s: gp_exp without primitives.\n", __func__);
943         }
944         if (gp_normal1 && cur_ops->deferred_free) {
945                 synctype[nsynctypes++] = RTWS_DEF_FREE;
946                 pr_info("%s: Testing asynchronous GPs.\n", __func__);
947         } else if (gp_normal && !cur_ops->deferred_free) {
948                 pr_alert("%s: gp_normal without primitives.\n", __func__);
949         }
950         if (gp_sync1 && cur_ops->sync) {
951                 synctype[nsynctypes++] = RTWS_SYNC;
952                 pr_info("%s: Testing normal GPs.\n", __func__);
953         } else if (gp_sync && !cur_ops->sync) {
954                 pr_alert("%s: gp_sync without primitives.\n", __func__);
955         }
956         if (WARN_ONCE(nsynctypes == 0,
957                       "rcu_torture_writer: No update-side primitives.\n")) {
958                 /*
959                  * No updates primitives, so don't try updating.
960                  * The resulting test won't be testing much, hence the
961                  * above WARN_ONCE().
962                  */
963                 rcu_torture_writer_state = RTWS_STOPPING;
964                 torture_kthread_stopping("rcu_torture_writer");
965         }
966
967         do {
968                 rcu_torture_writer_state = RTWS_FIXED_DELAY;
969                 schedule_timeout_uninterruptible(1);
970                 rp = rcu_torture_alloc();
971                 if (rp == NULL)
972                         continue;
973                 rp->rtort_pipe_count = 0;
974                 rcu_torture_writer_state = RTWS_DELAY;
975                 udelay(torture_random(&rand) & 0x3ff);
976                 rcu_torture_writer_state = RTWS_REPLACE;
977                 old_rp = rcu_dereference_check(rcu_torture_current,
978                                                current == writer_task);
979                 rp->rtort_mbtest = 1;
980                 rcu_assign_pointer(rcu_torture_current, rp);
981                 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
982                 if (old_rp) {
983                         i = old_rp->rtort_pipe_count;
984                         if (i > RCU_TORTURE_PIPE_LEN)
985                                 i = RCU_TORTURE_PIPE_LEN;
986                         atomic_inc(&rcu_torture_wcount[i]);
987                         old_rp->rtort_pipe_count++;
988                         switch (synctype[torture_random(&rand) % nsynctypes]) {
989                         case RTWS_DEF_FREE:
990                                 rcu_torture_writer_state = RTWS_DEF_FREE;
991                                 cur_ops->deferred_free(old_rp);
992                                 break;
993                         case RTWS_EXP_SYNC:
994                                 rcu_torture_writer_state = RTWS_EXP_SYNC;
995                                 cur_ops->exp_sync();
996                                 rcu_torture_pipe_update(old_rp);
997                                 break;
998                         case RTWS_COND_GET:
999                                 rcu_torture_writer_state = RTWS_COND_GET;
1000                                 gp_snap = cur_ops->get_state();
1001                                 i = torture_random(&rand) % 16;
1002                                 if (i != 0)
1003                                         schedule_timeout_interruptible(i);
1004                                 udelay(torture_random(&rand) % 1000);
1005                                 rcu_torture_writer_state = RTWS_COND_SYNC;
1006                                 cur_ops->cond_sync(gp_snap);
1007                                 rcu_torture_pipe_update(old_rp);
1008                                 break;
1009                         case RTWS_SYNC:
1010                                 rcu_torture_writer_state = RTWS_SYNC;
1011                                 cur_ops->sync();
1012                                 rcu_torture_pipe_update(old_rp);
1013                                 break;
1014                         default:
1015                                 WARN_ON_ONCE(1);
1016                                 break;
1017                         }
1018                 }
1019                 rcutorture_record_progress(++rcu_torture_current_version);
1020                 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1021                 if (can_expedite &&
1022                     !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1023                         WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1024                         if (expediting >= 0)
1025                                 rcu_expedite_gp();
1026                         else
1027                                 rcu_unexpedite_gp();
1028                         if (++expediting > 3)
1029                                 expediting = -expediting;
1030                 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1031                         can_expedite = !rcu_gp_is_expedited() &&
1032                                        !rcu_gp_is_normal();
1033                 }
1034                 rcu_torture_writer_state = RTWS_STUTTER;
1035                 stutter_wait("rcu_torture_writer");
1036         } while (!torture_must_stop());
1037         /* Reset expediting back to unexpedited. */
1038         if (expediting > 0)
1039                 expediting = -expediting;
1040         while (can_expedite && expediting++ < 0)
1041                 rcu_unexpedite_gp();
1042         WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1043         if (!can_expedite)
1044                 pr_alert("%s" TORTURE_FLAG
1045                          " Dynamic grace-period expediting was disabled.\n",
1046                          torture_type);
1047         rcu_torture_writer_state = RTWS_STOPPING;
1048         torture_kthread_stopping("rcu_torture_writer");
1049         return 0;
1050 }
1051
1052 /*
1053  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1054  * delay between calls.
1055  */
1056 static int
1057 rcu_torture_fakewriter(void *arg)
1058 {
1059         DEFINE_TORTURE_RANDOM(rand);
1060
1061         VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1062         set_user_nice(current, MAX_NICE);
1063
1064         do {
1065                 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1066                 udelay(torture_random(&rand) & 0x3ff);
1067                 if (cur_ops->cb_barrier != NULL &&
1068                     torture_random(&rand) % (nfakewriters * 8) == 0) {
1069                         cur_ops->cb_barrier();
1070                 } else if (gp_normal == gp_exp) {
1071                         if (cur_ops->sync && torture_random(&rand) & 0x80)
1072                                 cur_ops->sync();
1073                         else if (cur_ops->exp_sync)
1074                                 cur_ops->exp_sync();
1075                 } else if (gp_normal && cur_ops->sync) {
1076                         cur_ops->sync();
1077                 } else if (cur_ops->exp_sync) {
1078                         cur_ops->exp_sync();
1079                 }
1080                 stutter_wait("rcu_torture_fakewriter");
1081         } while (!torture_must_stop());
1082
1083         torture_kthread_stopping("rcu_torture_fakewriter");
1084         return 0;
1085 }
1086
1087 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1088 {
1089         kfree(rhp);
1090 }
1091
1092 /*
1093  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1094  * incrementing the corresponding element of the pipeline array.  The
1095  * counter in the element should never be greater than 1, otherwise, the
1096  * RCU implementation is broken.
1097  */
1098 static void rcu_torture_timer(struct timer_list *unused)
1099 {
1100         int idx;
1101         unsigned long started;
1102         unsigned long completed;
1103         static DEFINE_TORTURE_RANDOM(rand);
1104         static DEFINE_SPINLOCK(rand_lock);
1105         struct rcu_torture *p;
1106         int pipe_count;
1107         unsigned long long ts;
1108
1109         idx = cur_ops->readlock();
1110         started = cur_ops->get_gp_seq();
1111         ts = rcu_trace_clock_local();
1112         p = rcu_dereference_check(rcu_torture_current,
1113                                   rcu_read_lock_bh_held() ||
1114                                   rcu_read_lock_sched_held() ||
1115                                   srcu_read_lock_held(srcu_ctlp) ||
1116                                   torturing_tasks());
1117         if (p == NULL) {
1118                 /* Leave because rcu_torture_writer is not yet underway */
1119                 cur_ops->readunlock(idx);
1120                 return;
1121         }
1122         if (p->rtort_mbtest == 0)
1123                 atomic_inc(&n_rcu_torture_mberror);
1124         spin_lock(&rand_lock);
1125         cur_ops->read_delay(&rand);
1126         n_rcu_torture_timers++;
1127         spin_unlock(&rand_lock);
1128         preempt_disable();
1129         pipe_count = p->rtort_pipe_count;
1130         if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1131                 /* Should not happen, but... */
1132                 pipe_count = RCU_TORTURE_PIPE_LEN;
1133         }
1134         completed = cur_ops->get_gp_seq();
1135         if (pipe_count > 1) {
1136                 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1137                                           started, completed);
1138                 rcu_ftrace_dump(DUMP_ALL);
1139         }
1140         __this_cpu_inc(rcu_torture_count[pipe_count]);
1141         completed = rcutorture_seq_diff(completed, started);
1142         if (completed > RCU_TORTURE_PIPE_LEN) {
1143                 /* Should not happen, but... */
1144                 completed = RCU_TORTURE_PIPE_LEN;
1145         }
1146         __this_cpu_inc(rcu_torture_batch[completed]);
1147         preempt_enable();
1148         cur_ops->readunlock(idx);
1149
1150         /* Test call_rcu() invocation from interrupt handler. */
1151         if (cur_ops->call) {
1152                 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1153
1154                 if (rhp)
1155                         cur_ops->call(rhp, rcu_torture_timer_cb);
1156         }
1157 }
1158
1159 /*
1160  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1161  * incrementing the corresponding element of the pipeline array.  The
1162  * counter in the element should never be greater than 1, otherwise, the
1163  * RCU implementation is broken.
1164  */
1165 static int
1166 rcu_torture_reader(void *arg)
1167 {
1168         unsigned long started;
1169         unsigned long completed;
1170         int idx;
1171         DEFINE_TORTURE_RANDOM(rand);
1172         struct rcu_torture *p;
1173         int pipe_count;
1174         struct timer_list t;
1175         unsigned long long ts;
1176
1177         VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1178         set_user_nice(current, MAX_NICE);
1179         if (irqreader && cur_ops->irq_capable)
1180                 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1181
1182         do {
1183                 if (irqreader && cur_ops->irq_capable) {
1184                         if (!timer_pending(&t))
1185                                 mod_timer(&t, jiffies + 1);
1186                 }
1187                 idx = cur_ops->readlock();
1188                 started = cur_ops->get_gp_seq();
1189                 ts = rcu_trace_clock_local();
1190                 p = rcu_dereference_check(rcu_torture_current,
1191                                           rcu_read_lock_bh_held() ||
1192                                           rcu_read_lock_sched_held() ||
1193                                           srcu_read_lock_held(srcu_ctlp) ||
1194                                           torturing_tasks());
1195                 if (p == NULL) {
1196                         /* Wait for rcu_torture_writer to get underway */
1197                         cur_ops->readunlock(idx);
1198                         schedule_timeout_interruptible(HZ);
1199                         continue;
1200                 }
1201                 if (p->rtort_mbtest == 0)
1202                         atomic_inc(&n_rcu_torture_mberror);
1203                 cur_ops->read_delay(&rand);
1204                 preempt_disable();
1205                 pipe_count = p->rtort_pipe_count;
1206                 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1207                         /* Should not happen, but... */
1208                         pipe_count = RCU_TORTURE_PIPE_LEN;
1209                 }
1210                 completed = cur_ops->get_gp_seq();
1211                 if (pipe_count > 1) {
1212                         do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1213                                                   ts, started, completed);
1214                         rcu_ftrace_dump(DUMP_ALL);
1215                 }
1216                 __this_cpu_inc(rcu_torture_count[pipe_count]);
1217                 completed = rcutorture_seq_diff(completed, started);
1218                 if (completed > RCU_TORTURE_PIPE_LEN) {
1219                         /* Should not happen, but... */
1220                         completed = RCU_TORTURE_PIPE_LEN;
1221                 }
1222                 __this_cpu_inc(rcu_torture_batch[completed]);
1223                 preempt_enable();
1224                 cur_ops->readunlock(idx);
1225                 stutter_wait("rcu_torture_reader");
1226         } while (!torture_must_stop());
1227         if (irqreader && cur_ops->irq_capable) {
1228                 del_timer_sync(&t);
1229                 destroy_timer_on_stack(&t);
1230         }
1231         torture_kthread_stopping("rcu_torture_reader");
1232         return 0;
1233 }
1234
1235 /*
1236  * Print torture statistics.  Caller must ensure that there is only
1237  * one call to this function at a given time!!!  This is normally
1238  * accomplished by relying on the module system to only have one copy
1239  * of the module loaded, and then by giving the rcu_torture_stats
1240  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1241  * thread is not running).
1242  */
1243 static void
1244 rcu_torture_stats_print(void)
1245 {
1246         int cpu;
1247         int i;
1248         long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1249         long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1250         static unsigned long rtcv_snap = ULONG_MAX;
1251         static bool splatted;
1252         struct task_struct *wtp;
1253
1254         for_each_possible_cpu(cpu) {
1255                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1256                         pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1257                         batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1258                 }
1259         }
1260         for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1261                 if (pipesummary[i] != 0)
1262                         break;
1263         }
1264
1265         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1266         pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1267                 rcu_torture_current,
1268                 rcu_torture_current_version,
1269                 list_empty(&rcu_torture_freelist),
1270                 atomic_read(&n_rcu_torture_alloc),
1271                 atomic_read(&n_rcu_torture_alloc_fail),
1272                 atomic_read(&n_rcu_torture_free));
1273         pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1274                 atomic_read(&n_rcu_torture_mberror),
1275                 n_rcu_torture_barrier_error,
1276                 n_rcu_torture_boost_ktrerror,
1277                 n_rcu_torture_boost_rterror);
1278         pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1279                 n_rcu_torture_boost_failure,
1280                 n_rcu_torture_boosts,
1281                 n_rcu_torture_timers);
1282         torture_onoff_stats();
1283         pr_cont("barrier: %ld/%ld:%ld ",
1284                 n_barrier_successes,
1285                 n_barrier_attempts,
1286                 n_rcu_torture_barrier_error);
1287         pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
1288
1289         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1290         if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1291             n_rcu_torture_barrier_error != 0 ||
1292             n_rcu_torture_boost_ktrerror != 0 ||
1293             n_rcu_torture_boost_rterror != 0 ||
1294             n_rcu_torture_boost_failure != 0 ||
1295             i > 1) {
1296                 pr_cont("%s", "!!! ");
1297                 atomic_inc(&n_rcu_torture_error);
1298                 WARN_ON_ONCE(1);
1299         }
1300         pr_cont("Reader Pipe: ");
1301         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1302                 pr_cont(" %ld", pipesummary[i]);
1303         pr_cont("\n");
1304
1305         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1306         pr_cont("Reader Batch: ");
1307         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1308                 pr_cont(" %ld", batchsummary[i]);
1309         pr_cont("\n");
1310
1311         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1312         pr_cont("Free-Block Circulation: ");
1313         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1314                 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1315         }
1316         pr_cont("\n");
1317
1318         if (cur_ops->stats)
1319                 cur_ops->stats();
1320         if (rtcv_snap == rcu_torture_current_version &&
1321             rcu_torture_current != NULL) {
1322                 int __maybe_unused flags = 0;
1323                 unsigned long __maybe_unused gp_seq = 0;
1324
1325                 rcutorture_get_gp_data(cur_ops->ttype,
1326                                        &flags, &gp_seq);
1327                 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1328                                         &flags, &gp_seq);
1329                 wtp = READ_ONCE(writer_task);
1330                 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1331                          rcu_torture_writer_state_getname(),
1332                          rcu_torture_writer_state, gp_seq, flags,
1333                          wtp == NULL ? ~0UL : wtp->state,
1334                          wtp == NULL ? -1 : (int)task_cpu(wtp));
1335                 if (!splatted && wtp) {
1336                         sched_show_task(wtp);
1337                         splatted = true;
1338                 }
1339                 show_rcu_gp_kthreads();
1340                 rcu_ftrace_dump(DUMP_ALL);
1341         }
1342         rtcv_snap = rcu_torture_current_version;
1343 }
1344
1345 /*
1346  * Periodically prints torture statistics, if periodic statistics printing
1347  * was specified via the stat_interval module parameter.
1348  */
1349 static int
1350 rcu_torture_stats(void *arg)
1351 {
1352         VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1353         do {
1354                 schedule_timeout_interruptible(stat_interval * HZ);
1355                 rcu_torture_stats_print();
1356                 torture_shutdown_absorb("rcu_torture_stats");
1357         } while (!torture_must_stop());
1358         torture_kthread_stopping("rcu_torture_stats");
1359         return 0;
1360 }
1361
1362 static inline void
1363 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1364 {
1365         pr_alert("%s" TORTURE_FLAG
1366                  "--- %s: nreaders=%d nfakewriters=%d "
1367                  "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1368                  "shuffle_interval=%d stutter=%d irqreader=%d "
1369                  "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1370                  "test_boost=%d/%d test_boost_interval=%d "
1371                  "test_boost_duration=%d shutdown_secs=%d "
1372                  "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1373                  "n_barrier_cbs=%d "
1374                  "onoff_interval=%d onoff_holdoff=%d\n",
1375                  torture_type, tag, nrealreaders, nfakewriters,
1376                  stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1377                  stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1378                  test_boost, cur_ops->can_boost,
1379                  test_boost_interval, test_boost_duration, shutdown_secs,
1380                  stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1381                  n_barrier_cbs,
1382                  onoff_interval, onoff_holdoff);
1383 }
1384
1385 static int rcutorture_booster_cleanup(unsigned int cpu)
1386 {
1387         struct task_struct *t;
1388
1389         if (boost_tasks[cpu] == NULL)
1390                 return 0;
1391         mutex_lock(&boost_mutex);
1392         t = boost_tasks[cpu];
1393         boost_tasks[cpu] = NULL;
1394         mutex_unlock(&boost_mutex);
1395
1396         /* This must be outside of the mutex, otherwise deadlock! */
1397         torture_stop_kthread(rcu_torture_boost, t);
1398         return 0;
1399 }
1400
1401 static int rcutorture_booster_init(unsigned int cpu)
1402 {
1403         int retval;
1404
1405         if (boost_tasks[cpu] != NULL)
1406                 return 0;  /* Already created, nothing more to do. */
1407
1408         /* Don't allow time recalculation while creating a new task. */
1409         mutex_lock(&boost_mutex);
1410         VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1411         boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1412                                                   cpu_to_node(cpu),
1413                                                   "rcu_torture_boost");
1414         if (IS_ERR(boost_tasks[cpu])) {
1415                 retval = PTR_ERR(boost_tasks[cpu]);
1416                 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1417                 n_rcu_torture_boost_ktrerror++;
1418                 boost_tasks[cpu] = NULL;
1419                 mutex_unlock(&boost_mutex);
1420                 return retval;
1421         }
1422         kthread_bind(boost_tasks[cpu], cpu);
1423         wake_up_process(boost_tasks[cpu]);
1424         mutex_unlock(&boost_mutex);
1425         return 0;
1426 }
1427
1428 /*
1429  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1430  * induces a CPU stall for the time specified by stall_cpu.
1431  */
1432 static int rcu_torture_stall(void *args)
1433 {
1434         unsigned long stop_at;
1435
1436         VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1437         if (stall_cpu_holdoff > 0) {
1438                 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1439                 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1440                 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1441         }
1442         if (!kthread_should_stop()) {
1443                 stop_at = get_seconds() + stall_cpu;
1444                 /* RCU CPU stall is expected behavior in following code. */
1445                 rcu_read_lock();
1446                 if (stall_cpu_irqsoff)
1447                         local_irq_disable();
1448                 else
1449                         preempt_disable();
1450                 pr_alert("rcu_torture_stall start on CPU %d.\n",
1451                          smp_processor_id());
1452                 while (ULONG_CMP_LT(get_seconds(), stop_at))
1453                         continue;  /* Induce RCU CPU stall warning. */
1454                 if (stall_cpu_irqsoff)
1455                         local_irq_enable();
1456                 else
1457                         preempt_enable();
1458                 rcu_read_unlock();
1459                 pr_alert("rcu_torture_stall end.\n");
1460         }
1461         torture_shutdown_absorb("rcu_torture_stall");
1462         while (!kthread_should_stop())
1463                 schedule_timeout_interruptible(10 * HZ);
1464         return 0;
1465 }
1466
1467 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1468 static int __init rcu_torture_stall_init(void)
1469 {
1470         if (stall_cpu <= 0)
1471                 return 0;
1472         return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1473 }
1474
1475 /* Callback function for RCU barrier testing. */
1476 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1477 {
1478         atomic_inc(&barrier_cbs_invoked);
1479 }
1480
1481 /* kthread function to register callbacks used to test RCU barriers. */
1482 static int rcu_torture_barrier_cbs(void *arg)
1483 {
1484         long myid = (long)arg;
1485         bool lastphase = 0;
1486         bool newphase;
1487         struct rcu_head rcu;
1488
1489         init_rcu_head_on_stack(&rcu);
1490         VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1491         set_user_nice(current, MAX_NICE);
1492         do {
1493                 wait_event(barrier_cbs_wq[myid],
1494                            (newphase =
1495                             smp_load_acquire(&barrier_phase)) != lastphase ||
1496                            torture_must_stop());
1497                 lastphase = newphase;
1498                 if (torture_must_stop())
1499                         break;
1500                 /*
1501                  * The above smp_load_acquire() ensures barrier_phase load
1502                  * is ordered before the following ->call().
1503                  */
1504                 local_irq_disable(); /* Just to test no-irq call_rcu(). */
1505                 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1506                 local_irq_enable();
1507                 if (atomic_dec_and_test(&barrier_cbs_count))
1508                         wake_up(&barrier_wq);
1509         } while (!torture_must_stop());
1510         if (cur_ops->cb_barrier != NULL)
1511                 cur_ops->cb_barrier();
1512         destroy_rcu_head_on_stack(&rcu);
1513         torture_kthread_stopping("rcu_torture_barrier_cbs");
1514         return 0;
1515 }
1516
1517 /* kthread function to drive and coordinate RCU barrier testing. */
1518 static int rcu_torture_barrier(void *arg)
1519 {
1520         int i;
1521
1522         VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1523         do {
1524                 atomic_set(&barrier_cbs_invoked, 0);
1525                 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1526                 /* Ensure barrier_phase ordered after prior assignments. */
1527                 smp_store_release(&barrier_phase, !barrier_phase);
1528                 for (i = 0; i < n_barrier_cbs; i++)
1529                         wake_up(&barrier_cbs_wq[i]);
1530                 wait_event(barrier_wq,
1531                            atomic_read(&barrier_cbs_count) == 0 ||
1532                            torture_must_stop());
1533                 if (torture_must_stop())
1534                         break;
1535                 n_barrier_attempts++;
1536                 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1537                 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1538                         n_rcu_torture_barrier_error++;
1539                         pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1540                                atomic_read(&barrier_cbs_invoked),
1541                                n_barrier_cbs);
1542                         WARN_ON_ONCE(1);
1543                 }
1544                 n_barrier_successes++;
1545                 schedule_timeout_interruptible(HZ / 10);
1546         } while (!torture_must_stop());
1547         torture_kthread_stopping("rcu_torture_barrier");
1548         return 0;
1549 }
1550
1551 /* Initialize RCU barrier testing. */
1552 static int rcu_torture_barrier_init(void)
1553 {
1554         int i;
1555         int ret;
1556
1557         if (n_barrier_cbs <= 0)
1558                 return 0;
1559         if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1560                 pr_alert("%s" TORTURE_FLAG
1561                          " Call or barrier ops missing for %s,\n",
1562                          torture_type, cur_ops->name);
1563                 pr_alert("%s" TORTURE_FLAG
1564                          " RCU barrier testing omitted from run.\n",
1565                          torture_type);
1566                 return 0;
1567         }
1568         atomic_set(&barrier_cbs_count, 0);
1569         atomic_set(&barrier_cbs_invoked, 0);
1570         barrier_cbs_tasks =
1571                 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
1572                         GFP_KERNEL);
1573         barrier_cbs_wq =
1574                 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
1575         if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1576                 return -ENOMEM;
1577         for (i = 0; i < n_barrier_cbs; i++) {
1578                 init_waitqueue_head(&barrier_cbs_wq[i]);
1579                 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1580                                              (void *)(long)i,
1581                                              barrier_cbs_tasks[i]);
1582                 if (ret)
1583                         return ret;
1584         }
1585         return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
1586 }
1587
1588 /* Clean up after RCU barrier testing. */
1589 static void rcu_torture_barrier_cleanup(void)
1590 {
1591         int i;
1592
1593         torture_stop_kthread(rcu_torture_barrier, barrier_task);
1594         if (barrier_cbs_tasks != NULL) {
1595                 for (i = 0; i < n_barrier_cbs; i++)
1596                         torture_stop_kthread(rcu_torture_barrier_cbs,
1597                                              barrier_cbs_tasks[i]);
1598                 kfree(barrier_cbs_tasks);
1599                 barrier_cbs_tasks = NULL;
1600         }
1601         if (barrier_cbs_wq != NULL) {
1602                 kfree(barrier_cbs_wq);
1603                 barrier_cbs_wq = NULL;
1604         }
1605 }
1606
1607 static enum cpuhp_state rcutor_hp;
1608
1609 static void
1610 rcu_torture_cleanup(void)
1611 {
1612         int flags = 0;
1613         unsigned long gp_seq = 0;
1614         int i;
1615
1616         rcutorture_record_test_transition();
1617         if (torture_cleanup_begin()) {
1618                 if (cur_ops->cb_barrier != NULL)
1619                         cur_ops->cb_barrier();
1620                 return;
1621         }
1622
1623         rcu_torture_barrier_cleanup();
1624         torture_stop_kthread(rcu_torture_stall, stall_task);
1625         torture_stop_kthread(rcu_torture_writer, writer_task);
1626
1627         if (reader_tasks) {
1628                 for (i = 0; i < nrealreaders; i++)
1629                         torture_stop_kthread(rcu_torture_reader,
1630                                              reader_tasks[i]);
1631                 kfree(reader_tasks);
1632         }
1633         rcu_torture_current = NULL;
1634
1635         if (fakewriter_tasks) {
1636                 for (i = 0; i < nfakewriters; i++) {
1637                         torture_stop_kthread(rcu_torture_fakewriter,
1638                                              fakewriter_tasks[i]);
1639                 }
1640                 kfree(fakewriter_tasks);
1641                 fakewriter_tasks = NULL;
1642         }
1643
1644         rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
1645         srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
1646         pr_alert("%s:  End-test grace-period state: g%lu f%#x\n",
1647                  cur_ops->name, gp_seq, flags);
1648         torture_stop_kthread(rcu_torture_stats, stats_task);
1649         torture_stop_kthread(rcu_torture_fqs, fqs_task);
1650         for (i = 0; i < ncbflooders; i++)
1651                 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
1652         if ((test_boost == 1 && cur_ops->can_boost) ||
1653             test_boost == 2)
1654                 cpuhp_remove_state(rcutor_hp);
1655
1656         /*
1657          * Wait for all RCU callbacks to fire, then do flavor-specific
1658          * cleanup operations.
1659          */
1660         if (cur_ops->cb_barrier != NULL)
1661                 cur_ops->cb_barrier();
1662         if (cur_ops->cleanup != NULL)
1663                 cur_ops->cleanup();
1664
1665         rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1666
1667         if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1668                 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1669         else if (torture_onoff_failures())
1670                 rcu_torture_print_module_parms(cur_ops,
1671                                                "End of test: RCU_HOTPLUG");
1672         else
1673                 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1674         torture_cleanup_end();
1675 }
1676
1677 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1678 static void rcu_torture_leak_cb(struct rcu_head *rhp)
1679 {
1680 }
1681
1682 static void rcu_torture_err_cb(struct rcu_head *rhp)
1683 {
1684         /*
1685          * This -might- happen due to race conditions, but is unlikely.
1686          * The scenario that leads to this happening is that the
1687          * first of the pair of duplicate callbacks is queued,
1688          * someone else starts a grace period that includes that
1689          * callback, then the second of the pair must wait for the
1690          * next grace period.  Unlikely, but can happen.  If it
1691          * does happen, the debug-objects subsystem won't have splatted.
1692          */
1693         pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
1694 }
1695 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1696
1697 /*
1698  * Verify that double-free causes debug-objects to complain, but only
1699  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
1700  * cannot be carried out.
1701  */
1702 static void rcu_test_debug_objects(void)
1703 {
1704 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1705         struct rcu_head rh1;
1706         struct rcu_head rh2;
1707
1708         init_rcu_head_on_stack(&rh1);
1709         init_rcu_head_on_stack(&rh2);
1710         pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
1711
1712         /* Try to queue the rh2 pair of callbacks for the same grace period. */
1713         preempt_disable(); /* Prevent preemption from interrupting test. */
1714         rcu_read_lock(); /* Make it impossible to finish a grace period. */
1715         call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1716         local_irq_disable(); /* Make it harder to start a new grace period. */
1717         call_rcu(&rh2, rcu_torture_leak_cb);
1718         call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1719         local_irq_enable();
1720         rcu_read_unlock();
1721         preempt_enable();
1722
1723         /* Wait for them all to get done so we can safely return. */
1724         rcu_barrier();
1725         pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
1726         destroy_rcu_head_on_stack(&rh1);
1727         destroy_rcu_head_on_stack(&rh2);
1728 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1729         pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
1730 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1731 }
1732
1733 static int __init
1734 rcu_torture_init(void)
1735 {
1736         int i;
1737         int cpu;
1738         int firsterr = 0;
1739         static struct rcu_torture_ops *torture_ops[] = {
1740                 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
1741                 &sched_ops, &tasks_ops,
1742         };
1743
1744         if (!torture_init_begin(torture_type, verbose))
1745                 return -EBUSY;
1746
1747         /* Process args and tell the world that the torturer is on the job. */
1748         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1749                 cur_ops = torture_ops[i];
1750                 if (strcmp(torture_type, cur_ops->name) == 0)
1751                         break;
1752         }
1753         if (i == ARRAY_SIZE(torture_ops)) {
1754                 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1755                          torture_type);
1756                 pr_alert("rcu-torture types:");
1757                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1758                         pr_alert(" %s", torture_ops[i]->name);
1759                 pr_alert("\n");
1760                 firsterr = -EINVAL;
1761                 goto unwind;
1762         }
1763         if (cur_ops->fqs == NULL && fqs_duration != 0) {
1764                 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1765                 fqs_duration = 0;
1766         }
1767         if (cur_ops->init)
1768                 cur_ops->init();
1769
1770         if (nreaders >= 0) {
1771                 nrealreaders = nreaders;
1772         } else {
1773                 nrealreaders = num_online_cpus() - 2 - nreaders;
1774                 if (nrealreaders <= 0)
1775                         nrealreaders = 1;
1776         }
1777         rcu_torture_print_module_parms(cur_ops, "Start of test");
1778
1779         /* Set up the freelist. */
1780
1781         INIT_LIST_HEAD(&rcu_torture_freelist);
1782         for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1783                 rcu_tortures[i].rtort_mbtest = 0;
1784                 list_add_tail(&rcu_tortures[i].rtort_free,
1785                               &rcu_torture_freelist);
1786         }
1787
1788         /* Initialize the statistics so that each run gets its own numbers. */
1789
1790         rcu_torture_current = NULL;
1791         rcu_torture_current_version = 0;
1792         atomic_set(&n_rcu_torture_alloc, 0);
1793         atomic_set(&n_rcu_torture_alloc_fail, 0);
1794         atomic_set(&n_rcu_torture_free, 0);
1795         atomic_set(&n_rcu_torture_mberror, 0);
1796         atomic_set(&n_rcu_torture_error, 0);
1797         n_rcu_torture_barrier_error = 0;
1798         n_rcu_torture_boost_ktrerror = 0;
1799         n_rcu_torture_boost_rterror = 0;
1800         n_rcu_torture_boost_failure = 0;
1801         n_rcu_torture_boosts = 0;
1802         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1803                 atomic_set(&rcu_torture_wcount[i], 0);
1804         for_each_possible_cpu(cpu) {
1805                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1806                         per_cpu(rcu_torture_count, cpu)[i] = 0;
1807                         per_cpu(rcu_torture_batch, cpu)[i] = 0;
1808                 }
1809         }
1810
1811         /* Start up the kthreads. */
1812
1813         firsterr = torture_create_kthread(rcu_torture_writer, NULL,
1814                                           writer_task);
1815         if (firsterr)
1816                 goto unwind;
1817         if (nfakewriters > 0) {
1818                 fakewriter_tasks = kcalloc(nfakewriters,
1819                                            sizeof(fakewriter_tasks[0]),
1820                                            GFP_KERNEL);
1821                 if (fakewriter_tasks == NULL) {
1822                         VERBOSE_TOROUT_ERRSTRING("out of memory");
1823                         firsterr = -ENOMEM;
1824                         goto unwind;
1825                 }
1826         }
1827         for (i = 0; i < nfakewriters; i++) {
1828                 firsterr = torture_create_kthread(rcu_torture_fakewriter,
1829                                                   NULL, fakewriter_tasks[i]);
1830                 if (firsterr)
1831                         goto unwind;
1832         }
1833         reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
1834                                GFP_KERNEL);
1835         if (reader_tasks == NULL) {
1836                 VERBOSE_TOROUT_ERRSTRING("out of memory");
1837                 firsterr = -ENOMEM;
1838                 goto unwind;
1839         }
1840         for (i = 0; i < nrealreaders; i++) {
1841                 firsterr = torture_create_kthread(rcu_torture_reader, NULL,
1842                                                   reader_tasks[i]);
1843                 if (firsterr)
1844                         goto unwind;
1845         }
1846         if (stat_interval > 0) {
1847                 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
1848                                                   stats_task);
1849                 if (firsterr)
1850                         goto unwind;
1851         }
1852         if (test_no_idle_hz && shuffle_interval > 0) {
1853                 firsterr = torture_shuffle_init(shuffle_interval * HZ);
1854                 if (firsterr)
1855                         goto unwind;
1856         }
1857         if (stutter < 0)
1858                 stutter = 0;
1859         if (stutter) {
1860                 firsterr = torture_stutter_init(stutter * HZ);
1861                 if (firsterr)
1862                         goto unwind;
1863         }
1864         if (fqs_duration < 0)
1865                 fqs_duration = 0;
1866         if (fqs_duration) {
1867                 /* Create the fqs thread */
1868                 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
1869                                                   fqs_task);
1870                 if (firsterr)
1871                         goto unwind;
1872         }
1873         if (test_boost_interval < 1)
1874                 test_boost_interval = 1;
1875         if (test_boost_duration < 2)
1876                 test_boost_duration = 2;
1877         if ((test_boost == 1 && cur_ops->can_boost) ||
1878             test_boost == 2) {
1879
1880                 boost_starttime = jiffies + test_boost_interval * HZ;
1881
1882                 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
1883                                              rcutorture_booster_init,
1884                                              rcutorture_booster_cleanup);
1885                 if (firsterr < 0)
1886                         goto unwind;
1887                 rcutor_hp = firsterr;
1888         }
1889         firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
1890         if (firsterr)
1891                 goto unwind;
1892         firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
1893         if (firsterr)
1894                 goto unwind;
1895         firsterr = rcu_torture_stall_init();
1896         if (firsterr)
1897                 goto unwind;
1898         firsterr = rcu_torture_barrier_init();
1899         if (firsterr)
1900                 goto unwind;
1901         if (object_debug)
1902                 rcu_test_debug_objects();
1903         if (cbflood_n_burst > 0) {
1904                 /* Create the cbflood threads */
1905                 ncbflooders = (num_online_cpus() + 3) / 4;
1906                 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
1907                                        GFP_KERNEL);
1908                 if (!cbflood_task) {
1909                         VERBOSE_TOROUT_ERRSTRING("out of memory");
1910                         firsterr = -ENOMEM;
1911                         goto unwind;
1912                 }
1913                 for (i = 0; i < ncbflooders; i++) {
1914                         firsterr = torture_create_kthread(rcu_torture_cbflood,
1915                                                           NULL,
1916                                                           cbflood_task[i]);
1917                         if (firsterr)
1918                                 goto unwind;
1919                 }
1920         }
1921         rcutorture_record_test_transition();
1922         torture_init_end();
1923         return 0;
1924
1925 unwind:
1926         torture_init_end();
1927         rcu_torture_cleanup();
1928         return firsterr;
1929 }
1930
1931 module_init(rcu_torture_init);
1932 module_exit(rcu_torture_cleanup);