2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53 "Josh Triplett <josh@freedesktop.org>");
55 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4; /* # fake writer threads */
57 static int stat_interval; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */
59 static int verbose; /* Print more debug info. */
60 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 static int stutter = 5; /* Start/stop testing interval (in sec) */
63 static int irqreader = 1; /* RCU readers from irq (timers). */
64 static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
65 static int fqs_holdoff = 0; /* Hold time within burst (us). */
66 static int fqs_stutter = 3; /* Wait time between bursts (s). */
67 static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
68 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
69 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
70 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
72 module_param(nreaders, int, 0444);
73 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
74 module_param(nfakewriters, int, 0444);
75 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
76 module_param(stat_interval, int, 0644);
77 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
78 module_param(verbose, bool, 0444);
79 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
80 module_param(test_no_idle_hz, bool, 0444);
81 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
82 module_param(shuffle_interval, int, 0444);
83 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
84 module_param(stutter, int, 0444);
85 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
86 module_param(irqreader, int, 0444);
87 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
88 module_param(fqs_duration, int, 0444);
89 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
90 module_param(fqs_holdoff, int, 0444);
91 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
92 module_param(fqs_stutter, int, 0444);
93 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
94 module_param(test_boost, int, 0444);
95 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
96 module_param(test_boost_interval, int, 0444);
97 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
98 module_param(test_boost_duration, int, 0444);
99 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
100 module_param(torture_type, charp, 0444);
101 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
103 #define TORTURE_FLAG "-torture:"
104 #define PRINTK_STRING(s) \
105 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
106 #define VERBOSE_PRINTK_STRING(s) \
107 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
108 #define VERBOSE_PRINTK_ERRSTRING(s) \
109 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
111 static char printk_buf[4096];
113 static int nrealreaders;
114 static struct task_struct *writer_task;
115 static struct task_struct **fakewriter_tasks;
116 static struct task_struct **reader_tasks;
117 static struct task_struct *stats_task;
118 static struct task_struct *shuffler_task;
119 static struct task_struct *stutter_task;
120 static struct task_struct *fqs_task;
121 static struct task_struct *boost_tasks[NR_CPUS];
123 #define RCU_TORTURE_PIPE_LEN 10
126 struct rcu_head rtort_rcu;
127 int rtort_pipe_count;
128 struct list_head rtort_free;
132 static LIST_HEAD(rcu_torture_freelist);
133 static struct rcu_torture __rcu *rcu_torture_current;
134 static unsigned long rcu_torture_current_version;
135 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
136 static DEFINE_SPINLOCK(rcu_torture_lock);
137 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
139 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
141 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
142 static atomic_t n_rcu_torture_alloc;
143 static atomic_t n_rcu_torture_alloc_fail;
144 static atomic_t n_rcu_torture_free;
145 static atomic_t n_rcu_torture_mberror;
146 static atomic_t n_rcu_torture_error;
147 static long n_rcu_torture_boost_ktrerror;
148 static long n_rcu_torture_boost_rterror;
149 static long n_rcu_torture_boost_failure;
150 static long n_rcu_torture_boosts;
151 static long n_rcu_torture_timers;
152 static struct list_head rcu_torture_removed;
153 static cpumask_var_t shuffle_tmp_mask;
155 static int stutter_pause_test;
157 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
158 #define RCUTORTURE_RUNNABLE_INIT 1
160 #define RCUTORTURE_RUNNABLE_INIT 0
162 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
164 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
165 #define rcu_can_boost() 1
166 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
167 #define rcu_can_boost() 0
168 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
170 static unsigned long boost_starttime; /* jiffies of next boost test start. */
171 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
172 /* and boost task create/destroy. */
174 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
176 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
177 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
178 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
179 static int fullstop = FULLSTOP_RMMOD;
181 * Protect fullstop transitions and spawning of kthreads.
183 static DEFINE_MUTEX(fullstop_mutex);
186 * Detect and respond to a system shutdown.
189 rcutorture_shutdown_notify(struct notifier_block *unused1,
190 unsigned long unused2, void *unused3)
192 mutex_lock(&fullstop_mutex);
193 if (fullstop == FULLSTOP_DONTSTOP)
194 fullstop = FULLSTOP_SHUTDOWN;
196 printk(KERN_WARNING /* but going down anyway, so... */
197 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
198 mutex_unlock(&fullstop_mutex);
203 * Absorb kthreads into a kernel function that won't return, so that
204 * they won't ever access module text or data again.
206 static void rcutorture_shutdown_absorb(char *title)
208 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
210 "rcutorture thread %s parking due to system shutdown\n",
212 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
217 * Allocate an element from the rcu_tortures pool.
219 static struct rcu_torture *
220 rcu_torture_alloc(void)
224 spin_lock_bh(&rcu_torture_lock);
225 if (list_empty(&rcu_torture_freelist)) {
226 atomic_inc(&n_rcu_torture_alloc_fail);
227 spin_unlock_bh(&rcu_torture_lock);
230 atomic_inc(&n_rcu_torture_alloc);
231 p = rcu_torture_freelist.next;
233 spin_unlock_bh(&rcu_torture_lock);
234 return container_of(p, struct rcu_torture, rtort_free);
238 * Free an element to the rcu_tortures pool.
241 rcu_torture_free(struct rcu_torture *p)
243 atomic_inc(&n_rcu_torture_free);
244 spin_lock_bh(&rcu_torture_lock);
245 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
246 spin_unlock_bh(&rcu_torture_lock);
249 struct rcu_random_state {
250 unsigned long rrs_state;
254 #define RCU_RANDOM_MULT 39916801 /* prime */
255 #define RCU_RANDOM_ADD 479001701 /* prime */
256 #define RCU_RANDOM_REFRESH 10000
258 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
261 * Crude but fast random-number generator. Uses a linear congruential
262 * generator, with occasional help from cpu_clock().
265 rcu_random(struct rcu_random_state *rrsp)
267 if (--rrsp->rrs_count < 0) {
268 rrsp->rrs_state += (unsigned long)local_clock();
269 rrsp->rrs_count = RCU_RANDOM_REFRESH;
271 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
272 return swahw32(rrsp->rrs_state);
276 rcu_stutter_wait(char *title)
278 while (stutter_pause_test || !rcutorture_runnable) {
279 if (rcutorture_runnable)
280 schedule_timeout_interruptible(1);
282 schedule_timeout_interruptible(round_jiffies_relative(HZ));
283 rcutorture_shutdown_absorb(title);
288 * Operations vector for selecting different types of tests.
291 struct rcu_torture_ops {
293 void (*cleanup)(void);
294 int (*readlock)(void);
295 void (*read_delay)(struct rcu_random_state *rrsp);
296 void (*readunlock)(int idx);
297 int (*completed)(void);
298 void (*deferred_free)(struct rcu_torture *p);
300 void (*cb_barrier)(void);
302 int (*stats)(char *page);
308 static struct rcu_torture_ops *cur_ops;
311 * Definitions for rcu torture testing.
314 static int rcu_torture_read_lock(void) __acquires(RCU)
320 static void rcu_read_delay(struct rcu_random_state *rrsp)
322 const unsigned long shortdelay_us = 200;
323 const unsigned long longdelay_ms = 50;
325 /* We want a short delay sometimes to make a reader delay the grace
326 * period, and we want a long delay occasionally to trigger
327 * force_quiescent_state. */
329 if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
330 mdelay(longdelay_ms);
331 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
332 udelay(shortdelay_us);
333 #ifdef CONFIG_PREEMPT
334 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
335 preempt_schedule(); /* No QS if preempt_disable() in effect */
339 static void rcu_torture_read_unlock(int idx) __releases(RCU)
344 static int rcu_torture_completed(void)
346 return rcu_batches_completed();
350 rcu_torture_cb(struct rcu_head *p)
353 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
355 if (fullstop != FULLSTOP_DONTSTOP) {
356 /* Test is ending, just drop callbacks on the floor. */
357 /* The next initialization will pick up the pieces. */
360 i = rp->rtort_pipe_count;
361 if (i > RCU_TORTURE_PIPE_LEN)
362 i = RCU_TORTURE_PIPE_LEN;
363 atomic_inc(&rcu_torture_wcount[i]);
364 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
365 rp->rtort_mbtest = 0;
366 rcu_torture_free(rp);
368 cur_ops->deferred_free(rp);
371 static int rcu_no_completed(void)
376 static void rcu_torture_deferred_free(struct rcu_torture *p)
378 call_rcu(&p->rtort_rcu, rcu_torture_cb);
381 static struct rcu_torture_ops rcu_ops = {
384 .readlock = rcu_torture_read_lock,
385 .read_delay = rcu_read_delay,
386 .readunlock = rcu_torture_read_unlock,
387 .completed = rcu_torture_completed,
388 .deferred_free = rcu_torture_deferred_free,
389 .sync = synchronize_rcu,
390 .cb_barrier = rcu_barrier,
391 .fqs = rcu_force_quiescent_state,
394 .can_boost = rcu_can_boost(),
398 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
401 struct rcu_torture *rp;
402 struct rcu_torture *rp1;
405 list_add(&p->rtort_free, &rcu_torture_removed);
406 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
407 i = rp->rtort_pipe_count;
408 if (i > RCU_TORTURE_PIPE_LEN)
409 i = RCU_TORTURE_PIPE_LEN;
410 atomic_inc(&rcu_torture_wcount[i]);
411 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
412 rp->rtort_mbtest = 0;
413 list_del(&rp->rtort_free);
414 rcu_torture_free(rp);
419 static void rcu_sync_torture_init(void)
421 INIT_LIST_HEAD(&rcu_torture_removed);
424 static struct rcu_torture_ops rcu_sync_ops = {
425 .init = rcu_sync_torture_init,
427 .readlock = rcu_torture_read_lock,
428 .read_delay = rcu_read_delay,
429 .readunlock = rcu_torture_read_unlock,
430 .completed = rcu_torture_completed,
431 .deferred_free = rcu_sync_torture_deferred_free,
432 .sync = synchronize_rcu,
434 .fqs = rcu_force_quiescent_state,
437 .can_boost = rcu_can_boost(),
441 static struct rcu_torture_ops rcu_expedited_ops = {
442 .init = rcu_sync_torture_init,
444 .readlock = rcu_torture_read_lock,
445 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
446 .readunlock = rcu_torture_read_unlock,
447 .completed = rcu_no_completed,
448 .deferred_free = rcu_sync_torture_deferred_free,
449 .sync = synchronize_rcu_expedited,
451 .fqs = rcu_force_quiescent_state,
454 .can_boost = rcu_can_boost(),
455 .name = "rcu_expedited"
459 * Definitions for rcu_bh torture testing.
462 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
468 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
470 rcu_read_unlock_bh();
473 static int rcu_bh_torture_completed(void)
475 return rcu_batches_completed_bh();
478 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
480 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
483 static struct rcu_torture_ops rcu_bh_ops = {
486 .readlock = rcu_bh_torture_read_lock,
487 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
488 .readunlock = rcu_bh_torture_read_unlock,
489 .completed = rcu_bh_torture_completed,
490 .deferred_free = rcu_bh_torture_deferred_free,
491 .sync = synchronize_rcu_bh,
492 .cb_barrier = rcu_barrier_bh,
493 .fqs = rcu_bh_force_quiescent_state,
499 static struct rcu_torture_ops rcu_bh_sync_ops = {
500 .init = rcu_sync_torture_init,
502 .readlock = rcu_bh_torture_read_lock,
503 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
504 .readunlock = rcu_bh_torture_read_unlock,
505 .completed = rcu_bh_torture_completed,
506 .deferred_free = rcu_sync_torture_deferred_free,
507 .sync = synchronize_rcu_bh,
509 .fqs = rcu_bh_force_quiescent_state,
512 .name = "rcu_bh_sync"
515 static struct rcu_torture_ops rcu_bh_expedited_ops = {
516 .init = rcu_sync_torture_init,
518 .readlock = rcu_bh_torture_read_lock,
519 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
520 .readunlock = rcu_bh_torture_read_unlock,
521 .completed = rcu_bh_torture_completed,
522 .deferred_free = rcu_sync_torture_deferred_free,
523 .sync = synchronize_rcu_bh_expedited,
525 .fqs = rcu_bh_force_quiescent_state,
528 .name = "rcu_bh_expedited"
532 * Definitions for srcu torture testing.
535 static struct srcu_struct srcu_ctl;
537 static void srcu_torture_init(void)
539 init_srcu_struct(&srcu_ctl);
540 rcu_sync_torture_init();
543 static void srcu_torture_cleanup(void)
545 synchronize_srcu(&srcu_ctl);
546 cleanup_srcu_struct(&srcu_ctl);
549 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
551 return srcu_read_lock(&srcu_ctl);
554 static void srcu_read_delay(struct rcu_random_state *rrsp)
557 const long uspertick = 1000000 / HZ;
558 const long longdelay = 10;
560 /* We want there to be long-running readers, but not all the time. */
562 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
564 schedule_timeout_interruptible(longdelay);
566 rcu_read_delay(rrsp);
569 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
571 srcu_read_unlock(&srcu_ctl, idx);
574 static int srcu_torture_completed(void)
576 return srcu_batches_completed(&srcu_ctl);
579 static void srcu_torture_synchronize(void)
581 synchronize_srcu(&srcu_ctl);
584 static int srcu_torture_stats(char *page)
588 int idx = srcu_ctl.completed & 0x1;
590 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
591 torture_type, TORTURE_FLAG, idx);
592 for_each_possible_cpu(cpu) {
593 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
594 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
595 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
597 cnt += sprintf(&page[cnt], "\n");
601 static struct rcu_torture_ops srcu_ops = {
602 .init = srcu_torture_init,
603 .cleanup = srcu_torture_cleanup,
604 .readlock = srcu_torture_read_lock,
605 .read_delay = srcu_read_delay,
606 .readunlock = srcu_torture_read_unlock,
607 .completed = srcu_torture_completed,
608 .deferred_free = rcu_sync_torture_deferred_free,
609 .sync = srcu_torture_synchronize,
611 .stats = srcu_torture_stats,
615 static void srcu_torture_synchronize_expedited(void)
617 synchronize_srcu_expedited(&srcu_ctl);
620 static struct rcu_torture_ops srcu_expedited_ops = {
621 .init = srcu_torture_init,
622 .cleanup = srcu_torture_cleanup,
623 .readlock = srcu_torture_read_lock,
624 .read_delay = srcu_read_delay,
625 .readunlock = srcu_torture_read_unlock,
626 .completed = srcu_torture_completed,
627 .deferred_free = rcu_sync_torture_deferred_free,
628 .sync = srcu_torture_synchronize_expedited,
630 .stats = srcu_torture_stats,
631 .name = "srcu_expedited"
635 * Definitions for sched torture testing.
638 static int sched_torture_read_lock(void)
644 static void sched_torture_read_unlock(int idx)
649 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
651 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
654 static struct rcu_torture_ops sched_ops = {
655 .init = rcu_sync_torture_init,
657 .readlock = sched_torture_read_lock,
658 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
659 .readunlock = sched_torture_read_unlock,
660 .completed = rcu_no_completed,
661 .deferred_free = rcu_sched_torture_deferred_free,
662 .sync = synchronize_sched,
663 .cb_barrier = rcu_barrier_sched,
664 .fqs = rcu_sched_force_quiescent_state,
670 static struct rcu_torture_ops sched_sync_ops = {
671 .init = rcu_sync_torture_init,
673 .readlock = sched_torture_read_lock,
674 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
675 .readunlock = sched_torture_read_unlock,
676 .completed = rcu_no_completed,
677 .deferred_free = rcu_sync_torture_deferred_free,
678 .sync = synchronize_sched,
680 .fqs = rcu_sched_force_quiescent_state,
685 static struct rcu_torture_ops sched_expedited_ops = {
686 .init = rcu_sync_torture_init,
688 .readlock = sched_torture_read_lock,
689 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
690 .readunlock = sched_torture_read_unlock,
691 .completed = rcu_no_completed,
692 .deferred_free = rcu_sync_torture_deferred_free,
693 .sync = synchronize_sched_expedited,
695 .fqs = rcu_sched_force_quiescent_state,
698 .name = "sched_expedited"
702 * RCU torture priority-boost testing. Runs one real-time thread per
703 * CPU for moderate bursts, repeatedly registering RCU callbacks and
704 * spinning waiting for them to be invoked. If a given callback takes
705 * too long to be invoked, we assume that priority inversion has occurred.
708 struct rcu_boost_inflight {
713 static void rcu_torture_boost_cb(struct rcu_head *head)
715 struct rcu_boost_inflight *rbip =
716 container_of(head, struct rcu_boost_inflight, rcu);
718 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
722 static int rcu_torture_boost(void *arg)
724 unsigned long call_rcu_time;
725 unsigned long endtime;
726 unsigned long oldstarttime;
727 struct rcu_boost_inflight rbi = { .inflight = 0 };
728 struct sched_param sp;
730 VERBOSE_PRINTK_STRING("rcu_torture_boost started");
732 /* Set real-time priority. */
733 sp.sched_priority = 1;
734 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
735 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
736 n_rcu_torture_boost_rterror++;
739 init_rcu_head_on_stack(&rbi.rcu);
740 /* Each pass through the following loop does one boost-test cycle. */
742 /* Wait for the next test interval. */
743 oldstarttime = boost_starttime;
744 while (jiffies - oldstarttime > ULONG_MAX / 2) {
745 schedule_timeout_uninterruptible(1);
746 rcu_stutter_wait("rcu_torture_boost");
747 if (kthread_should_stop() ||
748 fullstop != FULLSTOP_DONTSTOP)
752 /* Do one boost-test interval. */
753 endtime = oldstarttime + test_boost_duration * HZ;
754 call_rcu_time = jiffies;
755 while (jiffies - endtime > ULONG_MAX / 2) {
756 /* If we don't have a callback in flight, post one. */
758 smp_mb(); /* RCU core before ->inflight = 1. */
760 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
761 if (jiffies - call_rcu_time >
762 test_boost_duration * HZ - HZ / 2) {
763 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
764 n_rcu_torture_boost_failure++;
766 call_rcu_time = jiffies;
769 rcu_stutter_wait("rcu_torture_boost");
770 if (kthread_should_stop() ||
771 fullstop != FULLSTOP_DONTSTOP)
776 * Set the start time of the next test interval.
777 * Yes, this is vulnerable to long delays, but such
778 * delays simply cause a false negative for the next
779 * interval. Besides, we are running at RT priority,
780 * so delays should be relatively rare.
782 while (oldstarttime == boost_starttime) {
783 if (mutex_trylock(&boost_mutex)) {
784 boost_starttime = jiffies +
785 test_boost_interval * HZ;
786 n_rcu_torture_boosts++;
787 mutex_unlock(&boost_mutex);
790 schedule_timeout_uninterruptible(1);
793 /* Go do the stutter. */
794 checkwait: rcu_stutter_wait("rcu_torture_boost");
795 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
797 /* Clean up and exit. */
798 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
799 rcutorture_shutdown_absorb("rcu_torture_boost");
800 while (!kthread_should_stop() || rbi.inflight)
801 schedule_timeout_uninterruptible(1);
802 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
803 destroy_rcu_head_on_stack(&rbi.rcu);
808 * RCU torture force-quiescent-state kthread. Repeatedly induces
809 * bursts of calls to force_quiescent_state(), increasing the probability
810 * of occurrence of some important types of race conditions.
813 rcu_torture_fqs(void *arg)
815 unsigned long fqs_resume_time;
816 int fqs_burst_remaining;
818 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
820 fqs_resume_time = jiffies + fqs_stutter * HZ;
821 while (jiffies - fqs_resume_time > LONG_MAX) {
822 schedule_timeout_interruptible(1);
824 fqs_burst_remaining = fqs_duration;
825 while (fqs_burst_remaining > 0) {
828 fqs_burst_remaining -= fqs_holdoff;
830 rcu_stutter_wait("rcu_torture_fqs");
831 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
832 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
833 rcutorture_shutdown_absorb("rcu_torture_fqs");
834 while (!kthread_should_stop())
835 schedule_timeout_uninterruptible(1);
840 * RCU torture writer kthread. Repeatedly substitutes a new structure
841 * for that pointed to by rcu_torture_current, freeing the old structure
842 * after a series of grace periods (the "pipeline").
845 rcu_torture_writer(void *arg)
848 long oldbatch = rcu_batches_completed();
849 struct rcu_torture *rp;
850 struct rcu_torture *old_rp;
851 static DEFINE_RCU_RANDOM(rand);
853 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
854 set_user_nice(current, 19);
857 schedule_timeout_uninterruptible(1);
858 rp = rcu_torture_alloc();
861 rp->rtort_pipe_count = 0;
862 udelay(rcu_random(&rand) & 0x3ff);
863 old_rp = rcu_dereference_check(rcu_torture_current,
864 current == writer_task);
865 rp->rtort_mbtest = 1;
866 rcu_assign_pointer(rcu_torture_current, rp);
867 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
869 i = old_rp->rtort_pipe_count;
870 if (i > RCU_TORTURE_PIPE_LEN)
871 i = RCU_TORTURE_PIPE_LEN;
872 atomic_inc(&rcu_torture_wcount[i]);
873 old_rp->rtort_pipe_count++;
874 cur_ops->deferred_free(old_rp);
876 rcutorture_record_progress(++rcu_torture_current_version);
877 oldbatch = cur_ops->completed();
878 rcu_stutter_wait("rcu_torture_writer");
879 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
880 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
881 rcutorture_shutdown_absorb("rcu_torture_writer");
882 while (!kthread_should_stop())
883 schedule_timeout_uninterruptible(1);
888 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
889 * delay between calls.
892 rcu_torture_fakewriter(void *arg)
894 DEFINE_RCU_RANDOM(rand);
896 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
897 set_user_nice(current, 19);
900 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
901 udelay(rcu_random(&rand) & 0x3ff);
903 rcu_stutter_wait("rcu_torture_fakewriter");
904 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
906 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
907 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
908 while (!kthread_should_stop())
909 schedule_timeout_uninterruptible(1);
914 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
915 * incrementing the corresponding element of the pipeline array. The
916 * counter in the element should never be greater than 1, otherwise, the
917 * RCU implementation is broken.
919 static void rcu_torture_timer(unsigned long unused)
923 static DEFINE_RCU_RANDOM(rand);
924 static DEFINE_SPINLOCK(rand_lock);
925 struct rcu_torture *p;
928 idx = cur_ops->readlock();
929 completed = cur_ops->completed();
930 p = rcu_dereference_check(rcu_torture_current,
931 rcu_read_lock_bh_held() ||
932 rcu_read_lock_sched_held() ||
933 srcu_read_lock_held(&srcu_ctl));
935 /* Leave because rcu_torture_writer is not yet underway */
936 cur_ops->readunlock(idx);
939 if (p->rtort_mbtest == 0)
940 atomic_inc(&n_rcu_torture_mberror);
941 spin_lock(&rand_lock);
942 cur_ops->read_delay(&rand);
943 n_rcu_torture_timers++;
944 spin_unlock(&rand_lock);
946 pipe_count = p->rtort_pipe_count;
947 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
948 /* Should not happen, but... */
949 pipe_count = RCU_TORTURE_PIPE_LEN;
951 __this_cpu_inc(rcu_torture_count[pipe_count]);
952 completed = cur_ops->completed() - completed;
953 if (completed > RCU_TORTURE_PIPE_LEN) {
954 /* Should not happen, but... */
955 completed = RCU_TORTURE_PIPE_LEN;
957 __this_cpu_inc(rcu_torture_batch[completed]);
959 cur_ops->readunlock(idx);
963 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
964 * incrementing the corresponding element of the pipeline array. The
965 * counter in the element should never be greater than 1, otherwise, the
966 * RCU implementation is broken.
969 rcu_torture_reader(void *arg)
973 DEFINE_RCU_RANDOM(rand);
974 struct rcu_torture *p;
978 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
979 set_user_nice(current, 19);
980 if (irqreader && cur_ops->irq_capable)
981 setup_timer_on_stack(&t, rcu_torture_timer, 0);
984 if (irqreader && cur_ops->irq_capable) {
985 if (!timer_pending(&t))
986 mod_timer(&t, jiffies + 1);
988 idx = cur_ops->readlock();
989 completed = cur_ops->completed();
990 p = rcu_dereference_check(rcu_torture_current,
991 rcu_read_lock_bh_held() ||
992 rcu_read_lock_sched_held() ||
993 srcu_read_lock_held(&srcu_ctl));
995 /* Wait for rcu_torture_writer to get underway */
996 cur_ops->readunlock(idx);
997 schedule_timeout_interruptible(HZ);
1000 if (p->rtort_mbtest == 0)
1001 atomic_inc(&n_rcu_torture_mberror);
1002 cur_ops->read_delay(&rand);
1004 pipe_count = p->rtort_pipe_count;
1005 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1006 /* Should not happen, but... */
1007 pipe_count = RCU_TORTURE_PIPE_LEN;
1009 __this_cpu_inc(rcu_torture_count[pipe_count]);
1010 completed = cur_ops->completed() - completed;
1011 if (completed > RCU_TORTURE_PIPE_LEN) {
1012 /* Should not happen, but... */
1013 completed = RCU_TORTURE_PIPE_LEN;
1015 __this_cpu_inc(rcu_torture_batch[completed]);
1017 cur_ops->readunlock(idx);
1019 rcu_stutter_wait("rcu_torture_reader");
1020 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1021 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1022 rcutorture_shutdown_absorb("rcu_torture_reader");
1023 if (irqreader && cur_ops->irq_capable)
1025 while (!kthread_should_stop())
1026 schedule_timeout_uninterruptible(1);
1031 * Create an RCU-torture statistics message in the specified buffer.
1034 rcu_torture_printk(char *page)
1039 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1040 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1042 for_each_possible_cpu(cpu) {
1043 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1044 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1045 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1048 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1049 if (pipesummary[i] != 0)
1052 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1053 cnt += sprintf(&page[cnt],
1054 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1055 "rtmbe: %d rtbke: %ld rtbre: %ld "
1056 "rtbf: %ld rtb: %ld nt: %ld",
1057 rcu_torture_current,
1058 rcu_torture_current_version,
1059 list_empty(&rcu_torture_freelist),
1060 atomic_read(&n_rcu_torture_alloc),
1061 atomic_read(&n_rcu_torture_alloc_fail),
1062 atomic_read(&n_rcu_torture_free),
1063 atomic_read(&n_rcu_torture_mberror),
1064 n_rcu_torture_boost_ktrerror,
1065 n_rcu_torture_boost_rterror,
1066 n_rcu_torture_boost_failure,
1067 n_rcu_torture_boosts,
1068 n_rcu_torture_timers);
1069 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1070 n_rcu_torture_boost_ktrerror != 0 ||
1071 n_rcu_torture_boost_rterror != 0 ||
1072 n_rcu_torture_boost_failure != 0)
1073 cnt += sprintf(&page[cnt], " !!!");
1074 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1076 cnt += sprintf(&page[cnt], "!!! ");
1077 atomic_inc(&n_rcu_torture_error);
1080 cnt += sprintf(&page[cnt], "Reader Pipe: ");
1081 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1082 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1083 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1084 cnt += sprintf(&page[cnt], "Reader Batch: ");
1085 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1086 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1087 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1088 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1089 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1090 cnt += sprintf(&page[cnt], " %d",
1091 atomic_read(&rcu_torture_wcount[i]));
1093 cnt += sprintf(&page[cnt], "\n");
1095 cnt += cur_ops->stats(&page[cnt]);
1100 * Print torture statistics. Caller must ensure that there is only
1101 * one call to this function at a given time!!! This is normally
1102 * accomplished by relying on the module system to only have one copy
1103 * of the module loaded, and then by giving the rcu_torture_stats
1104 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1105 * thread is not running).
1108 rcu_torture_stats_print(void)
1112 cnt = rcu_torture_printk(printk_buf);
1113 printk(KERN_ALERT "%s", printk_buf);
1117 * Periodically prints torture statistics, if periodic statistics printing
1118 * was specified via the stat_interval module parameter.
1120 * No need to worry about fullstop here, since this one doesn't reference
1121 * volatile state or register callbacks.
1124 rcu_torture_stats(void *arg)
1126 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1128 schedule_timeout_interruptible(stat_interval * HZ);
1129 rcu_torture_stats_print();
1130 rcutorture_shutdown_absorb("rcu_torture_stats");
1131 } while (!kthread_should_stop());
1132 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1136 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
1138 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1139 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1141 static void rcu_torture_shuffle_tasks(void)
1145 cpumask_setall(shuffle_tmp_mask);
1148 /* No point in shuffling if there is only one online CPU (ex: UP) */
1149 if (num_online_cpus() == 1) {
1154 if (rcu_idle_cpu != -1)
1155 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1157 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1160 for (i = 0; i < nrealreaders; i++)
1161 if (reader_tasks[i])
1162 set_cpus_allowed_ptr(reader_tasks[i],
1166 if (fakewriter_tasks) {
1167 for (i = 0; i < nfakewriters; i++)
1168 if (fakewriter_tasks[i])
1169 set_cpus_allowed_ptr(fakewriter_tasks[i],
1174 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1177 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1179 if (rcu_idle_cpu == -1)
1180 rcu_idle_cpu = num_online_cpus() - 1;
1187 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1188 * system to become idle at a time and cut off its timer ticks. This is meant
1189 * to test the support for such tickless idle CPU in RCU.
1192 rcu_torture_shuffle(void *arg)
1194 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1196 schedule_timeout_interruptible(shuffle_interval * HZ);
1197 rcu_torture_shuffle_tasks();
1198 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1199 } while (!kthread_should_stop());
1200 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1204 /* Cause the rcutorture test to "stutter", starting and stopping all
1205 * threads periodically.
1208 rcu_torture_stutter(void *arg)
1210 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1212 schedule_timeout_interruptible(stutter * HZ);
1213 stutter_pause_test = 1;
1214 if (!kthread_should_stop())
1215 schedule_timeout_interruptible(stutter * HZ);
1216 stutter_pause_test = 0;
1217 rcutorture_shutdown_absorb("rcu_torture_stutter");
1218 } while (!kthread_should_stop());
1219 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1224 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1226 printk(KERN_ALERT "%s" TORTURE_FLAG
1227 "--- %s: nreaders=%d nfakewriters=%d "
1228 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1229 "shuffle_interval=%d stutter=%d irqreader=%d "
1230 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1231 "test_boost=%d/%d test_boost_interval=%d "
1232 "test_boost_duration=%d\n",
1233 torture_type, tag, nrealreaders, nfakewriters,
1234 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1235 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1236 test_boost, cur_ops->can_boost,
1237 test_boost_interval, test_boost_duration);
1240 static struct notifier_block rcutorture_shutdown_nb = {
1241 .notifier_call = rcutorture_shutdown_notify,
1244 static void rcutorture_booster_cleanup(int cpu)
1246 struct task_struct *t;
1248 if (boost_tasks[cpu] == NULL)
1250 mutex_lock(&boost_mutex);
1251 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1252 t = boost_tasks[cpu];
1253 boost_tasks[cpu] = NULL;
1254 mutex_unlock(&boost_mutex);
1256 /* This must be outside of the mutex, otherwise deadlock! */
1260 static int rcutorture_booster_init(int cpu)
1264 if (boost_tasks[cpu] != NULL)
1265 return 0; /* Already created, nothing more to do. */
1267 /* Don't allow time recalculation while creating a new task. */
1268 mutex_lock(&boost_mutex);
1269 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1270 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1272 "rcu_torture_boost");
1273 if (IS_ERR(boost_tasks[cpu])) {
1274 retval = PTR_ERR(boost_tasks[cpu]);
1275 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1276 n_rcu_torture_boost_ktrerror++;
1277 boost_tasks[cpu] = NULL;
1278 mutex_unlock(&boost_mutex);
1281 kthread_bind(boost_tasks[cpu], cpu);
1282 wake_up_process(boost_tasks[cpu]);
1283 mutex_unlock(&boost_mutex);
1287 static int rcutorture_cpu_notify(struct notifier_block *self,
1288 unsigned long action, void *hcpu)
1290 long cpu = (long)hcpu;
1294 case CPU_DOWN_FAILED:
1295 (void)rcutorture_booster_init(cpu);
1297 case CPU_DOWN_PREPARE:
1298 rcutorture_booster_cleanup(cpu);
1306 static struct notifier_block rcutorture_cpu_nb = {
1307 .notifier_call = rcutorture_cpu_notify,
1311 rcu_torture_cleanup(void)
1315 mutex_lock(&fullstop_mutex);
1316 rcutorture_record_test_transition();
1317 if (fullstop == FULLSTOP_SHUTDOWN) {
1318 printk(KERN_WARNING /* but going down anyway, so... */
1319 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1320 mutex_unlock(&fullstop_mutex);
1321 schedule_timeout_uninterruptible(10);
1322 if (cur_ops->cb_barrier != NULL)
1323 cur_ops->cb_barrier();
1326 fullstop = FULLSTOP_RMMOD;
1327 mutex_unlock(&fullstop_mutex);
1328 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1330 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1331 kthread_stop(stutter_task);
1333 stutter_task = NULL;
1334 if (shuffler_task) {
1335 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1336 kthread_stop(shuffler_task);
1337 free_cpumask_var(shuffle_tmp_mask);
1339 shuffler_task = NULL;
1342 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1343 kthread_stop(writer_task);
1348 for (i = 0; i < nrealreaders; i++) {
1349 if (reader_tasks[i]) {
1350 VERBOSE_PRINTK_STRING(
1351 "Stopping rcu_torture_reader task");
1352 kthread_stop(reader_tasks[i]);
1354 reader_tasks[i] = NULL;
1356 kfree(reader_tasks);
1357 reader_tasks = NULL;
1359 rcu_torture_current = NULL;
1361 if (fakewriter_tasks) {
1362 for (i = 0; i < nfakewriters; i++) {
1363 if (fakewriter_tasks[i]) {
1364 VERBOSE_PRINTK_STRING(
1365 "Stopping rcu_torture_fakewriter task");
1366 kthread_stop(fakewriter_tasks[i]);
1368 fakewriter_tasks[i] = NULL;
1370 kfree(fakewriter_tasks);
1371 fakewriter_tasks = NULL;
1375 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1376 kthread_stop(stats_task);
1381 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1382 kthread_stop(fqs_task);
1385 if ((test_boost == 1 && cur_ops->can_boost) ||
1387 unregister_cpu_notifier(&rcutorture_cpu_nb);
1388 for_each_possible_cpu(i)
1389 rcutorture_booster_cleanup(i);
1392 /* Wait for all RCU callbacks to fire. */
1394 if (cur_ops->cb_barrier != NULL)
1395 cur_ops->cb_barrier();
1397 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1399 if (cur_ops->cleanup)
1401 if (atomic_read(&n_rcu_torture_error))
1402 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1404 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1408 rcu_torture_init(void)
1413 static struct rcu_torture_ops *torture_ops[] =
1414 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1415 &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1416 &srcu_ops, &srcu_expedited_ops,
1417 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1419 mutex_lock(&fullstop_mutex);
1421 /* Process args and tell the world that the torturer is on the job. */
1422 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1423 cur_ops = torture_ops[i];
1424 if (strcmp(torture_type, cur_ops->name) == 0)
1427 if (i == ARRAY_SIZE(torture_ops)) {
1428 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1430 printk(KERN_ALERT "rcu-torture types:");
1431 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1432 printk(KERN_ALERT " %s", torture_ops[i]->name);
1433 printk(KERN_ALERT "\n");
1434 mutex_unlock(&fullstop_mutex);
1437 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1438 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1439 "fqs_duration, fqs disabled.\n");
1443 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1446 nrealreaders = nreaders;
1448 nrealreaders = 2 * num_online_cpus();
1449 rcu_torture_print_module_parms(cur_ops, "Start of test");
1450 fullstop = FULLSTOP_DONTSTOP;
1452 /* Set up the freelist. */
1454 INIT_LIST_HEAD(&rcu_torture_freelist);
1455 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1456 rcu_tortures[i].rtort_mbtest = 0;
1457 list_add_tail(&rcu_tortures[i].rtort_free,
1458 &rcu_torture_freelist);
1461 /* Initialize the statistics so that each run gets its own numbers. */
1463 rcu_torture_current = NULL;
1464 rcu_torture_current_version = 0;
1465 atomic_set(&n_rcu_torture_alloc, 0);
1466 atomic_set(&n_rcu_torture_alloc_fail, 0);
1467 atomic_set(&n_rcu_torture_free, 0);
1468 atomic_set(&n_rcu_torture_mberror, 0);
1469 atomic_set(&n_rcu_torture_error, 0);
1470 n_rcu_torture_boost_ktrerror = 0;
1471 n_rcu_torture_boost_rterror = 0;
1472 n_rcu_torture_boost_failure = 0;
1473 n_rcu_torture_boosts = 0;
1474 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1475 atomic_set(&rcu_torture_wcount[i], 0);
1476 for_each_possible_cpu(cpu) {
1477 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1478 per_cpu(rcu_torture_count, cpu)[i] = 0;
1479 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1483 /* Start up the kthreads. */
1485 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1486 writer_task = kthread_run(rcu_torture_writer, NULL,
1487 "rcu_torture_writer");
1488 if (IS_ERR(writer_task)) {
1489 firsterr = PTR_ERR(writer_task);
1490 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1494 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1496 if (fakewriter_tasks == NULL) {
1497 VERBOSE_PRINTK_ERRSTRING("out of memory");
1501 for (i = 0; i < nfakewriters; i++) {
1502 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1503 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1504 "rcu_torture_fakewriter");
1505 if (IS_ERR(fakewriter_tasks[i])) {
1506 firsterr = PTR_ERR(fakewriter_tasks[i]);
1507 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1508 fakewriter_tasks[i] = NULL;
1512 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1514 if (reader_tasks == NULL) {
1515 VERBOSE_PRINTK_ERRSTRING("out of memory");
1519 for (i = 0; i < nrealreaders; i++) {
1520 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1521 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1522 "rcu_torture_reader");
1523 if (IS_ERR(reader_tasks[i])) {
1524 firsterr = PTR_ERR(reader_tasks[i]);
1525 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1526 reader_tasks[i] = NULL;
1530 if (stat_interval > 0) {
1531 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1532 stats_task = kthread_run(rcu_torture_stats, NULL,
1533 "rcu_torture_stats");
1534 if (IS_ERR(stats_task)) {
1535 firsterr = PTR_ERR(stats_task);
1536 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1541 if (test_no_idle_hz) {
1542 rcu_idle_cpu = num_online_cpus() - 1;
1544 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1546 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1550 /* Create the shuffler thread */
1551 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1552 "rcu_torture_shuffle");
1553 if (IS_ERR(shuffler_task)) {
1554 free_cpumask_var(shuffle_tmp_mask);
1555 firsterr = PTR_ERR(shuffler_task);
1556 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1557 shuffler_task = NULL;
1564 /* Create the stutter thread */
1565 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1566 "rcu_torture_stutter");
1567 if (IS_ERR(stutter_task)) {
1568 firsterr = PTR_ERR(stutter_task);
1569 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1570 stutter_task = NULL;
1574 if (fqs_duration < 0)
1577 /* Create the stutter thread */
1578 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1580 if (IS_ERR(fqs_task)) {
1581 firsterr = PTR_ERR(fqs_task);
1582 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1587 if (test_boost_interval < 1)
1588 test_boost_interval = 1;
1589 if (test_boost_duration < 2)
1590 test_boost_duration = 2;
1591 if ((test_boost == 1 && cur_ops->can_boost) ||
1595 boost_starttime = jiffies + test_boost_interval * HZ;
1596 register_cpu_notifier(&rcutorture_cpu_nb);
1597 for_each_possible_cpu(i) {
1598 if (cpu_is_offline(i))
1599 continue; /* Heuristic: CPU can go offline. */
1600 retval = rcutorture_booster_init(i);
1607 register_reboot_notifier(&rcutorture_shutdown_nb);
1608 rcutorture_record_test_transition();
1609 mutex_unlock(&fullstop_mutex);
1613 mutex_unlock(&fullstop_mutex);
1614 rcu_torture_cleanup();
1618 module_init(rcu_torture_init);
1619 module_exit(rcu_torture_cleanup);