]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/rcu/rcuperf.c
Merge tag 'asoc-fix-v5.5-rc6' into asoc-5.6
[linux.git] / kernel / rcu / rcuperf.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based performance-test facility
4  *
5  * Copyright (C) IBM Corporation, 2015
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 #define pr_fmt(fmt) fmt
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/smp.h>
20 #include <linux/rcupdate.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/atomic.h>
25 #include <linux/bitops.h>
26 #include <linux/completion.h>
27 #include <linux/moduleparam.h>
28 #include <linux/percpu.h>
29 #include <linux/notifier.h>
30 #include <linux/reboot.h>
31 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/delay.h>
34 #include <linux/stat.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <asm/byteorder.h>
38 #include <linux/torture.h>
39 #include <linux/vmalloc.h>
40
41 #include "rcu.h"
42
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
45
46 #define PERF_FLAG "-perf:"
47 #define PERFOUT_STRING(s) \
48         pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
49 #define VERBOSE_PERFOUT_STRING(s) \
50         do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
51 #define VERBOSE_PERFOUT_ERRSTRING(s) \
52         do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
53
54 /*
55  * The intended use cases for the nreaders and nwriters module parameters
56  * are as follows:
57  *
58  * 1.   Specify only the nr_cpus kernel boot parameter.  This will
59  *      set both nreaders and nwriters to the value specified by
60  *      nr_cpus for a mixed reader/writer test.
61  *
62  * 2.   Specify the nr_cpus kernel boot parameter, but set
63  *      rcuperf.nreaders to zero.  This will set nwriters to the
64  *      value specified by nr_cpus for an update-only test.
65  *
66  * 3.   Specify the nr_cpus kernel boot parameter, but set
67  *      rcuperf.nwriters to zero.  This will set nreaders to the
68  *      value specified by nr_cpus for a read-only test.
69  *
70  * Various other use cases may of course be specified.
71  */
72
73 #ifdef MODULE
74 # define RCUPERF_SHUTDOWN 0
75 #else
76 # define RCUPERF_SHUTDOWN 1
77 #endif
78
79 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
80 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
81 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
82 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
83 torture_param(int, nreaders, -1, "Number of RCU reader threads");
84 torture_param(int, nwriters, -1, "Number of RCU updater threads");
85 torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
86               "Shutdown at end of performance tests.");
87 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
88 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
89
90 static char *perf_type = "rcu";
91 module_param(perf_type, charp, 0444);
92 MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, srcu, ...)");
93
94 static int nrealreaders;
95 static int nrealwriters;
96 static struct task_struct **writer_tasks;
97 static struct task_struct **reader_tasks;
98 static struct task_struct *shutdown_task;
99
100 static u64 **writer_durations;
101 static int *writer_n_durations;
102 static atomic_t n_rcu_perf_reader_started;
103 static atomic_t n_rcu_perf_writer_started;
104 static atomic_t n_rcu_perf_writer_finished;
105 static wait_queue_head_t shutdown_wq;
106 static u64 t_rcu_perf_writer_started;
107 static u64 t_rcu_perf_writer_finished;
108 static unsigned long b_rcu_perf_writer_started;
109 static unsigned long b_rcu_perf_writer_finished;
110 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
111
112 #define MAX_MEAS 10000
113 #define MIN_MEAS 100
114
115 /*
116  * Operations vector for selecting different types of tests.
117  */
118
119 struct rcu_perf_ops {
120         int ptype;
121         void (*init)(void);
122         void (*cleanup)(void);
123         int (*readlock)(void);
124         void (*readunlock)(int idx);
125         unsigned long (*get_gp_seq)(void);
126         unsigned long (*gp_diff)(unsigned long new, unsigned long old);
127         unsigned long (*exp_completed)(void);
128         void (*async)(struct rcu_head *head, rcu_callback_t func);
129         void (*gp_barrier)(void);
130         void (*sync)(void);
131         void (*exp_sync)(void);
132         const char *name;
133 };
134
135 static struct rcu_perf_ops *cur_ops;
136
137 /*
138  * Definitions for rcu perf testing.
139  */
140
141 static int rcu_perf_read_lock(void) __acquires(RCU)
142 {
143         rcu_read_lock();
144         return 0;
145 }
146
147 static void rcu_perf_read_unlock(int idx) __releases(RCU)
148 {
149         rcu_read_unlock();
150 }
151
152 static unsigned long __maybe_unused rcu_no_completed(void)
153 {
154         return 0;
155 }
156
157 static void rcu_sync_perf_init(void)
158 {
159 }
160
161 static struct rcu_perf_ops rcu_ops = {
162         .ptype          = RCU_FLAVOR,
163         .init           = rcu_sync_perf_init,
164         .readlock       = rcu_perf_read_lock,
165         .readunlock     = rcu_perf_read_unlock,
166         .get_gp_seq     = rcu_get_gp_seq,
167         .gp_diff        = rcu_seq_diff,
168         .exp_completed  = rcu_exp_batches_completed,
169         .async          = call_rcu,
170         .gp_barrier     = rcu_barrier,
171         .sync           = synchronize_rcu,
172         .exp_sync       = synchronize_rcu_expedited,
173         .name           = "rcu"
174 };
175
176 /*
177  * Definitions for srcu perf testing.
178  */
179
180 DEFINE_STATIC_SRCU(srcu_ctl_perf);
181 static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
182
183 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
184 {
185         return srcu_read_lock(srcu_ctlp);
186 }
187
188 static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
189 {
190         srcu_read_unlock(srcu_ctlp, idx);
191 }
192
193 static unsigned long srcu_perf_completed(void)
194 {
195         return srcu_batches_completed(srcu_ctlp);
196 }
197
198 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
199 {
200         call_srcu(srcu_ctlp, head, func);
201 }
202
203 static void srcu_rcu_barrier(void)
204 {
205         srcu_barrier(srcu_ctlp);
206 }
207
208 static void srcu_perf_synchronize(void)
209 {
210         synchronize_srcu(srcu_ctlp);
211 }
212
213 static void srcu_perf_synchronize_expedited(void)
214 {
215         synchronize_srcu_expedited(srcu_ctlp);
216 }
217
218 static struct rcu_perf_ops srcu_ops = {
219         .ptype          = SRCU_FLAVOR,
220         .init           = rcu_sync_perf_init,
221         .readlock       = srcu_perf_read_lock,
222         .readunlock     = srcu_perf_read_unlock,
223         .get_gp_seq     = srcu_perf_completed,
224         .gp_diff        = rcu_seq_diff,
225         .exp_completed  = srcu_perf_completed,
226         .async          = srcu_call_rcu,
227         .gp_barrier     = srcu_rcu_barrier,
228         .sync           = srcu_perf_synchronize,
229         .exp_sync       = srcu_perf_synchronize_expedited,
230         .name           = "srcu"
231 };
232
233 static struct srcu_struct srcud;
234
235 static void srcu_sync_perf_init(void)
236 {
237         srcu_ctlp = &srcud;
238         init_srcu_struct(srcu_ctlp);
239 }
240
241 static void srcu_sync_perf_cleanup(void)
242 {
243         cleanup_srcu_struct(srcu_ctlp);
244 }
245
246 static struct rcu_perf_ops srcud_ops = {
247         .ptype          = SRCU_FLAVOR,
248         .init           = srcu_sync_perf_init,
249         .cleanup        = srcu_sync_perf_cleanup,
250         .readlock       = srcu_perf_read_lock,
251         .readunlock     = srcu_perf_read_unlock,
252         .get_gp_seq     = srcu_perf_completed,
253         .gp_diff        = rcu_seq_diff,
254         .exp_completed  = srcu_perf_completed,
255         .async          = srcu_call_rcu,
256         .gp_barrier     = srcu_rcu_barrier,
257         .sync           = srcu_perf_synchronize,
258         .exp_sync       = srcu_perf_synchronize_expedited,
259         .name           = "srcud"
260 };
261
262 /*
263  * Definitions for RCU-tasks perf testing.
264  */
265
266 static int tasks_perf_read_lock(void)
267 {
268         return 0;
269 }
270
271 static void tasks_perf_read_unlock(int idx)
272 {
273 }
274
275 static struct rcu_perf_ops tasks_ops = {
276         .ptype          = RCU_TASKS_FLAVOR,
277         .init           = rcu_sync_perf_init,
278         .readlock       = tasks_perf_read_lock,
279         .readunlock     = tasks_perf_read_unlock,
280         .get_gp_seq     = rcu_no_completed,
281         .gp_diff        = rcu_seq_diff,
282         .async          = call_rcu_tasks,
283         .gp_barrier     = rcu_barrier_tasks,
284         .sync           = synchronize_rcu_tasks,
285         .exp_sync       = synchronize_rcu_tasks,
286         .name           = "tasks"
287 };
288
289 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
290 {
291         if (!cur_ops->gp_diff)
292                 return new - old;
293         return cur_ops->gp_diff(new, old);
294 }
295
296 /*
297  * If performance tests complete, wait for shutdown to commence.
298  */
299 static void rcu_perf_wait_shutdown(void)
300 {
301         cond_resched_tasks_rcu_qs();
302         if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
303                 return;
304         while (!torture_must_stop())
305                 schedule_timeout_uninterruptible(1);
306 }
307
308 /*
309  * RCU perf reader kthread.  Repeatedly does empty RCU read-side
310  * critical section, minimizing update-side interference.
311  */
312 static int
313 rcu_perf_reader(void *arg)
314 {
315         unsigned long flags;
316         int idx;
317         long me = (long)arg;
318
319         VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
320         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
321         set_user_nice(current, MAX_NICE);
322         atomic_inc(&n_rcu_perf_reader_started);
323
324         do {
325                 local_irq_save(flags);
326                 idx = cur_ops->readlock();
327                 cur_ops->readunlock(idx);
328                 local_irq_restore(flags);
329                 rcu_perf_wait_shutdown();
330         } while (!torture_must_stop());
331         torture_kthread_stopping("rcu_perf_reader");
332         return 0;
333 }
334
335 /*
336  * Callback function for asynchronous grace periods from rcu_perf_writer().
337  */
338 static void rcu_perf_async_cb(struct rcu_head *rhp)
339 {
340         atomic_dec(this_cpu_ptr(&n_async_inflight));
341         kfree(rhp);
342 }
343
344 /*
345  * RCU perf writer kthread.  Repeatedly does a grace period.
346  */
347 static int
348 rcu_perf_writer(void *arg)
349 {
350         int i = 0;
351         int i_max;
352         long me = (long)arg;
353         struct rcu_head *rhp = NULL;
354         struct sched_param sp;
355         bool started = false, done = false, alldone = false;
356         u64 t;
357         u64 *wdp;
358         u64 *wdpp = writer_durations[me];
359
360         VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
361         WARN_ON(!wdpp);
362         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
363         sp.sched_priority = 1;
364         sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
365
366         if (holdoff)
367                 schedule_timeout_uninterruptible(holdoff * HZ);
368
369         /*
370          * Wait until rcu_end_inkernel_boot() is called for normal GP tests
371          * so that RCU is not always expedited for normal GP tests.
372          * The system_state test is approximate, but works well in practice.
373          */
374         while (!gp_exp && system_state != SYSTEM_RUNNING)
375                 schedule_timeout_uninterruptible(1);
376
377         t = ktime_get_mono_fast_ns();
378         if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
379                 t_rcu_perf_writer_started = t;
380                 if (gp_exp) {
381                         b_rcu_perf_writer_started =
382                                 cur_ops->exp_completed() / 2;
383                 } else {
384                         b_rcu_perf_writer_started = cur_ops->get_gp_seq();
385                 }
386         }
387
388         do {
389                 if (writer_holdoff)
390                         udelay(writer_holdoff);
391                 wdp = &wdpp[i];
392                 *wdp = ktime_get_mono_fast_ns();
393                 if (gp_async) {
394 retry:
395                         if (!rhp)
396                                 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
397                         if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
398                                 atomic_inc(this_cpu_ptr(&n_async_inflight));
399                                 cur_ops->async(rhp, rcu_perf_async_cb);
400                                 rhp = NULL;
401                         } else if (!kthread_should_stop()) {
402                                 cur_ops->gp_barrier();
403                                 goto retry;
404                         } else {
405                                 kfree(rhp); /* Because we are stopping. */
406                         }
407                 } else if (gp_exp) {
408                         cur_ops->exp_sync();
409                 } else {
410                         cur_ops->sync();
411                 }
412                 t = ktime_get_mono_fast_ns();
413                 *wdp = t - *wdp;
414                 i_max = i;
415                 if (!started &&
416                     atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
417                         started = true;
418                 if (!done && i >= MIN_MEAS) {
419                         done = true;
420                         sp.sched_priority = 0;
421                         sched_setscheduler_nocheck(current,
422                                                    SCHED_NORMAL, &sp);
423                         pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
424                                  perf_type, PERF_FLAG, me, MIN_MEAS);
425                         if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
426                             nrealwriters) {
427                                 schedule_timeout_interruptible(10);
428                                 rcu_ftrace_dump(DUMP_ALL);
429                                 PERFOUT_STRING("Test complete");
430                                 t_rcu_perf_writer_finished = t;
431                                 if (gp_exp) {
432                                         b_rcu_perf_writer_finished =
433                                                 cur_ops->exp_completed() / 2;
434                                 } else {
435                                         b_rcu_perf_writer_finished =
436                                                 cur_ops->get_gp_seq();
437                                 }
438                                 if (shutdown) {
439                                         smp_mb(); /* Assign before wake. */
440                                         wake_up(&shutdown_wq);
441                                 }
442                         }
443                 }
444                 if (done && !alldone &&
445                     atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
446                         alldone = true;
447                 if (started && !alldone && i < MAX_MEAS - 1)
448                         i++;
449                 rcu_perf_wait_shutdown();
450         } while (!torture_must_stop());
451         if (gp_async) {
452                 cur_ops->gp_barrier();
453         }
454         writer_n_durations[me] = i_max;
455         torture_kthread_stopping("rcu_perf_writer");
456         return 0;
457 }
458
459 static void
460 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
461 {
462         pr_alert("%s" PERF_FLAG
463                  "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
464                  perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
465 }
466
467 static void
468 rcu_perf_cleanup(void)
469 {
470         int i;
471         int j;
472         int ngps = 0;
473         u64 *wdp;
474         u64 *wdpp;
475
476         /*
477          * Would like warning at start, but everything is expedited
478          * during the mid-boot phase, so have to wait till the end.
479          */
480         if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
481                 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
482         if (rcu_gp_is_normal() && gp_exp)
483                 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
484         if (gp_exp && gp_async)
485                 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
486
487         if (torture_cleanup_begin())
488                 return;
489         if (!cur_ops) {
490                 torture_cleanup_end();
491                 return;
492         }
493
494         if (reader_tasks) {
495                 for (i = 0; i < nrealreaders; i++)
496                         torture_stop_kthread(rcu_perf_reader,
497                                              reader_tasks[i]);
498                 kfree(reader_tasks);
499         }
500
501         if (writer_tasks) {
502                 for (i = 0; i < nrealwriters; i++) {
503                         torture_stop_kthread(rcu_perf_writer,
504                                              writer_tasks[i]);
505                         if (!writer_n_durations)
506                                 continue;
507                         j = writer_n_durations[i];
508                         pr_alert("%s%s writer %d gps: %d\n",
509                                  perf_type, PERF_FLAG, i, j);
510                         ngps += j;
511                 }
512                 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
513                          perf_type, PERF_FLAG,
514                          t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
515                          t_rcu_perf_writer_finished -
516                          t_rcu_perf_writer_started,
517                          ngps,
518                          rcuperf_seq_diff(b_rcu_perf_writer_finished,
519                                           b_rcu_perf_writer_started));
520                 for (i = 0; i < nrealwriters; i++) {
521                         if (!writer_durations)
522                                 break;
523                         if (!writer_n_durations)
524                                 continue;
525                         wdpp = writer_durations[i];
526                         if (!wdpp)
527                                 continue;
528                         for (j = 0; j <= writer_n_durations[i]; j++) {
529                                 wdp = &wdpp[j];
530                                 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
531                                         perf_type, PERF_FLAG,
532                                         i, j, *wdp);
533                                 if (j % 100 == 0)
534                                         schedule_timeout_uninterruptible(1);
535                         }
536                         kfree(writer_durations[i]);
537                 }
538                 kfree(writer_tasks);
539                 kfree(writer_durations);
540                 kfree(writer_n_durations);
541         }
542
543         /* Do torture-type-specific cleanup operations.  */
544         if (cur_ops->cleanup != NULL)
545                 cur_ops->cleanup();
546
547         torture_cleanup_end();
548 }
549
550 /*
551  * Return the number if non-negative.  If -1, the number of CPUs.
552  * If less than -1, that much less than the number of CPUs, but
553  * at least one.
554  */
555 static int compute_real(int n)
556 {
557         int nr;
558
559         if (n >= 0) {
560                 nr = n;
561         } else {
562                 nr = num_online_cpus() + 1 + n;
563                 if (nr <= 0)
564                         nr = 1;
565         }
566         return nr;
567 }
568
569 /*
570  * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
571  * down system.
572  */
573 static int
574 rcu_perf_shutdown(void *arg)
575 {
576         do {
577                 wait_event(shutdown_wq,
578                            atomic_read(&n_rcu_perf_writer_finished) >=
579                            nrealwriters);
580         } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
581         smp_mb(); /* Wake before output. */
582         rcu_perf_cleanup();
583         kernel_power_off();
584         return -EINVAL;
585 }
586
587 static int __init
588 rcu_perf_init(void)
589 {
590         long i;
591         int firsterr = 0;
592         static struct rcu_perf_ops *perf_ops[] = {
593                 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
594         };
595
596         if (!torture_init_begin(perf_type, verbose))
597                 return -EBUSY;
598
599         /* Process args and tell the world that the perf'er is on the job. */
600         for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
601                 cur_ops = perf_ops[i];
602                 if (strcmp(perf_type, cur_ops->name) == 0)
603                         break;
604         }
605         if (i == ARRAY_SIZE(perf_ops)) {
606                 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
607                 pr_alert("rcu-perf types:");
608                 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
609                         pr_cont(" %s", perf_ops[i]->name);
610                 pr_cont("\n");
611                 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
612                 firsterr = -EINVAL;
613                 cur_ops = NULL;
614                 goto unwind;
615         }
616         if (cur_ops->init)
617                 cur_ops->init();
618
619         nrealwriters = compute_real(nwriters);
620         nrealreaders = compute_real(nreaders);
621         atomic_set(&n_rcu_perf_reader_started, 0);
622         atomic_set(&n_rcu_perf_writer_started, 0);
623         atomic_set(&n_rcu_perf_writer_finished, 0);
624         rcu_perf_print_module_parms(cur_ops, "Start of test");
625
626         /* Start up the kthreads. */
627
628         if (shutdown) {
629                 init_waitqueue_head(&shutdown_wq);
630                 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
631                                                   shutdown_task);
632                 if (firsterr)
633                         goto unwind;
634                 schedule_timeout_uninterruptible(1);
635         }
636         reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
637                                GFP_KERNEL);
638         if (reader_tasks == NULL) {
639                 VERBOSE_PERFOUT_ERRSTRING("out of memory");
640                 firsterr = -ENOMEM;
641                 goto unwind;
642         }
643         for (i = 0; i < nrealreaders; i++) {
644                 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
645                                                   reader_tasks[i]);
646                 if (firsterr)
647                         goto unwind;
648         }
649         while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
650                 schedule_timeout_uninterruptible(1);
651         writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
652                                GFP_KERNEL);
653         writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
654                                    GFP_KERNEL);
655         writer_n_durations =
656                 kcalloc(nrealwriters, sizeof(*writer_n_durations),
657                         GFP_KERNEL);
658         if (!writer_tasks || !writer_durations || !writer_n_durations) {
659                 VERBOSE_PERFOUT_ERRSTRING("out of memory");
660                 firsterr = -ENOMEM;
661                 goto unwind;
662         }
663         for (i = 0; i < nrealwriters; i++) {
664                 writer_durations[i] =
665                         kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
666                                 GFP_KERNEL);
667                 if (!writer_durations[i]) {
668                         firsterr = -ENOMEM;
669                         goto unwind;
670                 }
671                 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
672                                                   writer_tasks[i]);
673                 if (firsterr)
674                         goto unwind;
675         }
676         torture_init_end();
677         return 0;
678
679 unwind:
680         torture_init_end();
681         rcu_perf_cleanup();
682         return firsterr;
683 }
684
685 module_init(rcu_perf_init);
686 module_exit(rcu_perf_cleanup);