2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53 "Josh Triplett <josh@freedesktop.org>");
55 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4; /* # fake writer threads */
57 static int stat_interval; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */
59 static bool verbose; /* Print more debug info. */
60 static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 static int stutter = 5; /* Start/stop testing interval (in sec) */
63 static int irqreader = 1; /* RCU readers from irq (timers). */
64 static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
65 static int fqs_holdoff; /* Hold time within burst (us). */
66 static int fqs_stutter = 3; /* Wait time between bursts (s). */
67 static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
68 static int onoff_holdoff; /* Seconds after boot before CPU hotplugs. */
69 static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
70 static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
71 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
72 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
73 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
75 module_param(nreaders, int, 0444);
76 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
77 module_param(nfakewriters, int, 0444);
78 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
79 module_param(stat_interval, int, 0644);
80 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
81 module_param(verbose, bool, 0444);
82 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
83 module_param(test_no_idle_hz, bool, 0444);
84 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
85 module_param(shuffle_interval, int, 0444);
86 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
87 module_param(stutter, int, 0444);
88 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
89 module_param(irqreader, int, 0444);
90 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
91 module_param(fqs_duration, int, 0444);
92 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
93 module_param(fqs_holdoff, int, 0444);
94 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
95 module_param(fqs_stutter, int, 0444);
96 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
97 module_param(onoff_interval, int, 0444);
98 MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
99 module_param(onoff_holdoff, int, 0444);
100 MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
101 module_param(shutdown_secs, int, 0444);
102 MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
103 module_param(test_boost, int, 0444);
104 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
105 module_param(test_boost_interval, int, 0444);
106 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
107 module_param(test_boost_duration, int, 0444);
108 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
109 module_param(torture_type, charp, 0444);
110 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
112 #define TORTURE_FLAG "-torture:"
113 #define PRINTK_STRING(s) \
114 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
115 #define VERBOSE_PRINTK_STRING(s) \
116 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
117 #define VERBOSE_PRINTK_ERRSTRING(s) \
118 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
120 static char printk_buf[4096];
122 static int nrealreaders;
123 static struct task_struct *writer_task;
124 static struct task_struct **fakewriter_tasks;
125 static struct task_struct **reader_tasks;
126 static struct task_struct *stats_task;
127 static struct task_struct *shuffler_task;
128 static struct task_struct *stutter_task;
129 static struct task_struct *fqs_task;
130 static struct task_struct *boost_tasks[NR_CPUS];
131 static struct task_struct *shutdown_task;
132 #ifdef CONFIG_HOTPLUG_CPU
133 static struct task_struct *onoff_task;
134 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
136 #define RCU_TORTURE_PIPE_LEN 10
139 struct rcu_head rtort_rcu;
140 int rtort_pipe_count;
141 struct list_head rtort_free;
145 static LIST_HEAD(rcu_torture_freelist);
146 static struct rcu_torture __rcu *rcu_torture_current;
147 static unsigned long rcu_torture_current_version;
148 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
149 static DEFINE_SPINLOCK(rcu_torture_lock);
150 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
152 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
154 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
155 static atomic_t n_rcu_torture_alloc;
156 static atomic_t n_rcu_torture_alloc_fail;
157 static atomic_t n_rcu_torture_free;
158 static atomic_t n_rcu_torture_mberror;
159 static atomic_t n_rcu_torture_error;
160 static long n_rcu_torture_boost_ktrerror;
161 static long n_rcu_torture_boost_rterror;
162 static long n_rcu_torture_boost_failure;
163 static long n_rcu_torture_boosts;
164 static long n_rcu_torture_timers;
165 static long n_offline_attempts;
166 static long n_offline_successes;
167 static long n_online_attempts;
168 static long n_online_successes;
169 static struct list_head rcu_torture_removed;
170 static cpumask_var_t shuffle_tmp_mask;
172 static int stutter_pause_test;
174 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
175 #define RCUTORTURE_RUNNABLE_INIT 1
177 #define RCUTORTURE_RUNNABLE_INIT 0
179 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
180 module_param(rcutorture_runnable, int, 0444);
181 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
183 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
184 #define rcu_can_boost() 1
185 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
186 #define rcu_can_boost() 0
187 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
189 static unsigned long shutdown_time; /* jiffies to system shutdown. */
190 static unsigned long boost_starttime; /* jiffies of next boost test start. */
191 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
192 /* and boost task create/destroy. */
194 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
196 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
197 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
198 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
199 static int fullstop = FULLSTOP_RMMOD;
201 * Protect fullstop transitions and spawning of kthreads.
203 static DEFINE_MUTEX(fullstop_mutex);
205 /* Forward reference. */
206 static void rcu_torture_cleanup(void);
209 * Detect and respond to a system shutdown.
212 rcutorture_shutdown_notify(struct notifier_block *unused1,
213 unsigned long unused2, void *unused3)
215 mutex_lock(&fullstop_mutex);
216 if (fullstop == FULLSTOP_DONTSTOP)
217 fullstop = FULLSTOP_SHUTDOWN;
219 printk(KERN_WARNING /* but going down anyway, so... */
220 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
221 mutex_unlock(&fullstop_mutex);
226 * Absorb kthreads into a kernel function that won't return, so that
227 * they won't ever access module text or data again.
229 static void rcutorture_shutdown_absorb(char *title)
231 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
233 "rcutorture thread %s parking due to system shutdown\n",
235 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
240 * Allocate an element from the rcu_tortures pool.
242 static struct rcu_torture *
243 rcu_torture_alloc(void)
247 spin_lock_bh(&rcu_torture_lock);
248 if (list_empty(&rcu_torture_freelist)) {
249 atomic_inc(&n_rcu_torture_alloc_fail);
250 spin_unlock_bh(&rcu_torture_lock);
253 atomic_inc(&n_rcu_torture_alloc);
254 p = rcu_torture_freelist.next;
256 spin_unlock_bh(&rcu_torture_lock);
257 return container_of(p, struct rcu_torture, rtort_free);
261 * Free an element to the rcu_tortures pool.
264 rcu_torture_free(struct rcu_torture *p)
266 atomic_inc(&n_rcu_torture_free);
267 spin_lock_bh(&rcu_torture_lock);
268 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
269 spin_unlock_bh(&rcu_torture_lock);
272 struct rcu_random_state {
273 unsigned long rrs_state;
277 #define RCU_RANDOM_MULT 39916801 /* prime */
278 #define RCU_RANDOM_ADD 479001701 /* prime */
279 #define RCU_RANDOM_REFRESH 10000
281 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
284 * Crude but fast random-number generator. Uses a linear congruential
285 * generator, with occasional help from cpu_clock().
288 rcu_random(struct rcu_random_state *rrsp)
290 if (--rrsp->rrs_count < 0) {
291 rrsp->rrs_state += (unsigned long)local_clock();
292 rrsp->rrs_count = RCU_RANDOM_REFRESH;
294 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
295 return swahw32(rrsp->rrs_state);
299 rcu_stutter_wait(char *title)
301 while (stutter_pause_test || !rcutorture_runnable) {
302 if (rcutorture_runnable)
303 schedule_timeout_interruptible(1);
305 schedule_timeout_interruptible(round_jiffies_relative(HZ));
306 rcutorture_shutdown_absorb(title);
311 * Operations vector for selecting different types of tests.
314 struct rcu_torture_ops {
316 void (*cleanup)(void);
317 int (*readlock)(void);
318 void (*read_delay)(struct rcu_random_state *rrsp);
319 void (*readunlock)(int idx);
320 int (*completed)(void);
321 void (*deferred_free)(struct rcu_torture *p);
323 void (*cb_barrier)(void);
325 int (*stats)(char *page);
331 static struct rcu_torture_ops *cur_ops;
334 * Definitions for rcu torture testing.
337 static int rcu_torture_read_lock(void) __acquires(RCU)
343 static void rcu_read_delay(struct rcu_random_state *rrsp)
345 const unsigned long shortdelay_us = 200;
346 const unsigned long longdelay_ms = 50;
348 /* We want a short delay sometimes to make a reader delay the grace
349 * period, and we want a long delay occasionally to trigger
350 * force_quiescent_state. */
352 if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
353 mdelay(longdelay_ms);
354 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
355 udelay(shortdelay_us);
356 #ifdef CONFIG_PREEMPT
357 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
358 preempt_schedule(); /* No QS if preempt_disable() in effect */
362 static void rcu_torture_read_unlock(int idx) __releases(RCU)
367 static int rcu_torture_completed(void)
369 return rcu_batches_completed();
373 rcu_torture_cb(struct rcu_head *p)
376 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
378 if (fullstop != FULLSTOP_DONTSTOP) {
379 /* Test is ending, just drop callbacks on the floor. */
380 /* The next initialization will pick up the pieces. */
383 i = rp->rtort_pipe_count;
384 if (i > RCU_TORTURE_PIPE_LEN)
385 i = RCU_TORTURE_PIPE_LEN;
386 atomic_inc(&rcu_torture_wcount[i]);
387 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
388 rp->rtort_mbtest = 0;
389 rcu_torture_free(rp);
391 cur_ops->deferred_free(rp);
394 static int rcu_no_completed(void)
399 static void rcu_torture_deferred_free(struct rcu_torture *p)
401 call_rcu(&p->rtort_rcu, rcu_torture_cb);
404 static struct rcu_torture_ops rcu_ops = {
407 .readlock = rcu_torture_read_lock,
408 .read_delay = rcu_read_delay,
409 .readunlock = rcu_torture_read_unlock,
410 .completed = rcu_torture_completed,
411 .deferred_free = rcu_torture_deferred_free,
412 .sync = synchronize_rcu,
413 .cb_barrier = rcu_barrier,
414 .fqs = rcu_force_quiescent_state,
417 .can_boost = rcu_can_boost(),
421 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
424 struct rcu_torture *rp;
425 struct rcu_torture *rp1;
428 list_add(&p->rtort_free, &rcu_torture_removed);
429 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
430 i = rp->rtort_pipe_count;
431 if (i > RCU_TORTURE_PIPE_LEN)
432 i = RCU_TORTURE_PIPE_LEN;
433 atomic_inc(&rcu_torture_wcount[i]);
434 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
435 rp->rtort_mbtest = 0;
436 list_del(&rp->rtort_free);
437 rcu_torture_free(rp);
442 static void rcu_sync_torture_init(void)
444 INIT_LIST_HEAD(&rcu_torture_removed);
447 static struct rcu_torture_ops rcu_sync_ops = {
448 .init = rcu_sync_torture_init,
450 .readlock = rcu_torture_read_lock,
451 .read_delay = rcu_read_delay,
452 .readunlock = rcu_torture_read_unlock,
453 .completed = rcu_torture_completed,
454 .deferred_free = rcu_sync_torture_deferred_free,
455 .sync = synchronize_rcu,
457 .fqs = rcu_force_quiescent_state,
460 .can_boost = rcu_can_boost(),
464 static struct rcu_torture_ops rcu_expedited_ops = {
465 .init = rcu_sync_torture_init,
467 .readlock = rcu_torture_read_lock,
468 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
469 .readunlock = rcu_torture_read_unlock,
470 .completed = rcu_no_completed,
471 .deferred_free = rcu_sync_torture_deferred_free,
472 .sync = synchronize_rcu_expedited,
474 .fqs = rcu_force_quiescent_state,
477 .can_boost = rcu_can_boost(),
478 .name = "rcu_expedited"
482 * Definitions for rcu_bh torture testing.
485 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
491 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
493 rcu_read_unlock_bh();
496 static int rcu_bh_torture_completed(void)
498 return rcu_batches_completed_bh();
501 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
503 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
506 static struct rcu_torture_ops rcu_bh_ops = {
509 .readlock = rcu_bh_torture_read_lock,
510 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
511 .readunlock = rcu_bh_torture_read_unlock,
512 .completed = rcu_bh_torture_completed,
513 .deferred_free = rcu_bh_torture_deferred_free,
514 .sync = synchronize_rcu_bh,
515 .cb_barrier = rcu_barrier_bh,
516 .fqs = rcu_bh_force_quiescent_state,
522 static struct rcu_torture_ops rcu_bh_sync_ops = {
523 .init = rcu_sync_torture_init,
525 .readlock = rcu_bh_torture_read_lock,
526 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
527 .readunlock = rcu_bh_torture_read_unlock,
528 .completed = rcu_bh_torture_completed,
529 .deferred_free = rcu_sync_torture_deferred_free,
530 .sync = synchronize_rcu_bh,
532 .fqs = rcu_bh_force_quiescent_state,
535 .name = "rcu_bh_sync"
538 static struct rcu_torture_ops rcu_bh_expedited_ops = {
539 .init = rcu_sync_torture_init,
541 .readlock = rcu_bh_torture_read_lock,
542 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
543 .readunlock = rcu_bh_torture_read_unlock,
544 .completed = rcu_bh_torture_completed,
545 .deferred_free = rcu_sync_torture_deferred_free,
546 .sync = synchronize_rcu_bh_expedited,
548 .fqs = rcu_bh_force_quiescent_state,
551 .name = "rcu_bh_expedited"
555 * Definitions for srcu torture testing.
558 static struct srcu_struct srcu_ctl;
560 static void srcu_torture_init(void)
562 init_srcu_struct(&srcu_ctl);
563 rcu_sync_torture_init();
566 static void srcu_torture_cleanup(void)
568 synchronize_srcu(&srcu_ctl);
569 cleanup_srcu_struct(&srcu_ctl);
572 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
574 return srcu_read_lock(&srcu_ctl);
577 static void srcu_read_delay(struct rcu_random_state *rrsp)
580 const long uspertick = 1000000 / HZ;
581 const long longdelay = 10;
583 /* We want there to be long-running readers, but not all the time. */
585 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
587 schedule_timeout_interruptible(longdelay);
589 rcu_read_delay(rrsp);
592 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
594 srcu_read_unlock(&srcu_ctl, idx);
597 static int srcu_torture_completed(void)
599 return srcu_batches_completed(&srcu_ctl);
602 static void srcu_torture_synchronize(void)
604 synchronize_srcu(&srcu_ctl);
607 static int srcu_torture_stats(char *page)
611 int idx = srcu_ctl.completed & 0x1;
613 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
614 torture_type, TORTURE_FLAG, idx);
615 for_each_possible_cpu(cpu) {
616 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
617 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
618 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
620 cnt += sprintf(&page[cnt], "\n");
624 static struct rcu_torture_ops srcu_ops = {
625 .init = srcu_torture_init,
626 .cleanup = srcu_torture_cleanup,
627 .readlock = srcu_torture_read_lock,
628 .read_delay = srcu_read_delay,
629 .readunlock = srcu_torture_read_unlock,
630 .completed = srcu_torture_completed,
631 .deferred_free = rcu_sync_torture_deferred_free,
632 .sync = srcu_torture_synchronize,
634 .stats = srcu_torture_stats,
638 static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
640 return srcu_read_lock_raw(&srcu_ctl);
643 static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
645 srcu_read_unlock_raw(&srcu_ctl, idx);
648 static struct rcu_torture_ops srcu_raw_ops = {
649 .init = srcu_torture_init,
650 .cleanup = srcu_torture_cleanup,
651 .readlock = srcu_torture_read_lock_raw,
652 .read_delay = srcu_read_delay,
653 .readunlock = srcu_torture_read_unlock_raw,
654 .completed = srcu_torture_completed,
655 .deferred_free = rcu_sync_torture_deferred_free,
656 .sync = srcu_torture_synchronize,
658 .stats = srcu_torture_stats,
662 static void srcu_torture_synchronize_expedited(void)
664 synchronize_srcu_expedited(&srcu_ctl);
667 static struct rcu_torture_ops srcu_expedited_ops = {
668 .init = srcu_torture_init,
669 .cleanup = srcu_torture_cleanup,
670 .readlock = srcu_torture_read_lock,
671 .read_delay = srcu_read_delay,
672 .readunlock = srcu_torture_read_unlock,
673 .completed = srcu_torture_completed,
674 .deferred_free = rcu_sync_torture_deferred_free,
675 .sync = srcu_torture_synchronize_expedited,
677 .stats = srcu_torture_stats,
678 .name = "srcu_expedited"
682 * Definitions for sched torture testing.
685 static int sched_torture_read_lock(void)
691 static void sched_torture_read_unlock(int idx)
696 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
698 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
701 static struct rcu_torture_ops sched_ops = {
702 .init = rcu_sync_torture_init,
704 .readlock = sched_torture_read_lock,
705 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
706 .readunlock = sched_torture_read_unlock,
707 .completed = rcu_no_completed,
708 .deferred_free = rcu_sched_torture_deferred_free,
709 .sync = synchronize_sched,
710 .cb_barrier = rcu_barrier_sched,
711 .fqs = rcu_sched_force_quiescent_state,
717 static struct rcu_torture_ops sched_sync_ops = {
718 .init = rcu_sync_torture_init,
720 .readlock = sched_torture_read_lock,
721 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
722 .readunlock = sched_torture_read_unlock,
723 .completed = rcu_no_completed,
724 .deferred_free = rcu_sync_torture_deferred_free,
725 .sync = synchronize_sched,
727 .fqs = rcu_sched_force_quiescent_state,
732 static struct rcu_torture_ops sched_expedited_ops = {
733 .init = rcu_sync_torture_init,
735 .readlock = sched_torture_read_lock,
736 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
737 .readunlock = sched_torture_read_unlock,
738 .completed = rcu_no_completed,
739 .deferred_free = rcu_sync_torture_deferred_free,
740 .sync = synchronize_sched_expedited,
742 .fqs = rcu_sched_force_quiescent_state,
745 .name = "sched_expedited"
749 * RCU torture priority-boost testing. Runs one real-time thread per
750 * CPU for moderate bursts, repeatedly registering RCU callbacks and
751 * spinning waiting for them to be invoked. If a given callback takes
752 * too long to be invoked, we assume that priority inversion has occurred.
755 struct rcu_boost_inflight {
760 static void rcu_torture_boost_cb(struct rcu_head *head)
762 struct rcu_boost_inflight *rbip =
763 container_of(head, struct rcu_boost_inflight, rcu);
765 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
769 static int rcu_torture_boost(void *arg)
771 unsigned long call_rcu_time;
772 unsigned long endtime;
773 unsigned long oldstarttime;
774 struct rcu_boost_inflight rbi = { .inflight = 0 };
775 struct sched_param sp;
777 VERBOSE_PRINTK_STRING("rcu_torture_boost started");
779 /* Set real-time priority. */
780 sp.sched_priority = 1;
781 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
782 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
783 n_rcu_torture_boost_rterror++;
786 init_rcu_head_on_stack(&rbi.rcu);
787 /* Each pass through the following loop does one boost-test cycle. */
789 /* Wait for the next test interval. */
790 oldstarttime = boost_starttime;
791 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
792 schedule_timeout_uninterruptible(1);
793 rcu_stutter_wait("rcu_torture_boost");
794 if (kthread_should_stop() ||
795 fullstop != FULLSTOP_DONTSTOP)
799 /* Do one boost-test interval. */
800 endtime = oldstarttime + test_boost_duration * HZ;
801 call_rcu_time = jiffies;
802 while (ULONG_CMP_LT(jiffies, endtime)) {
803 /* If we don't have a callback in flight, post one. */
805 smp_mb(); /* RCU core before ->inflight = 1. */
807 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
808 if (jiffies - call_rcu_time >
809 test_boost_duration * HZ - HZ / 2) {
810 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
811 n_rcu_torture_boost_failure++;
813 call_rcu_time = jiffies;
816 rcu_stutter_wait("rcu_torture_boost");
817 if (kthread_should_stop() ||
818 fullstop != FULLSTOP_DONTSTOP)
823 * Set the start time of the next test interval.
824 * Yes, this is vulnerable to long delays, but such
825 * delays simply cause a false negative for the next
826 * interval. Besides, we are running at RT priority,
827 * so delays should be relatively rare.
829 while (oldstarttime == boost_starttime &&
830 !kthread_should_stop()) {
831 if (mutex_trylock(&boost_mutex)) {
832 boost_starttime = jiffies +
833 test_boost_interval * HZ;
834 n_rcu_torture_boosts++;
835 mutex_unlock(&boost_mutex);
838 schedule_timeout_uninterruptible(1);
841 /* Go do the stutter. */
842 checkwait: rcu_stutter_wait("rcu_torture_boost");
843 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
845 /* Clean up and exit. */
846 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
847 rcutorture_shutdown_absorb("rcu_torture_boost");
848 while (!kthread_should_stop() || rbi.inflight)
849 schedule_timeout_uninterruptible(1);
850 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
851 destroy_rcu_head_on_stack(&rbi.rcu);
856 * RCU torture force-quiescent-state kthread. Repeatedly induces
857 * bursts of calls to force_quiescent_state(), increasing the probability
858 * of occurrence of some important types of race conditions.
861 rcu_torture_fqs(void *arg)
863 unsigned long fqs_resume_time;
864 int fqs_burst_remaining;
866 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
868 fqs_resume_time = jiffies + fqs_stutter * HZ;
869 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
870 !kthread_should_stop()) {
871 schedule_timeout_interruptible(1);
873 fqs_burst_remaining = fqs_duration;
874 while (fqs_burst_remaining > 0 &&
875 !kthread_should_stop()) {
878 fqs_burst_remaining -= fqs_holdoff;
880 rcu_stutter_wait("rcu_torture_fqs");
881 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
882 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
883 rcutorture_shutdown_absorb("rcu_torture_fqs");
884 while (!kthread_should_stop())
885 schedule_timeout_uninterruptible(1);
890 * RCU torture writer kthread. Repeatedly substitutes a new structure
891 * for that pointed to by rcu_torture_current, freeing the old structure
892 * after a series of grace periods (the "pipeline").
895 rcu_torture_writer(void *arg)
898 long oldbatch = rcu_batches_completed();
899 struct rcu_torture *rp;
900 struct rcu_torture *old_rp;
901 static DEFINE_RCU_RANDOM(rand);
903 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
904 set_user_nice(current, 19);
907 schedule_timeout_uninterruptible(1);
908 rp = rcu_torture_alloc();
911 rp->rtort_pipe_count = 0;
912 udelay(rcu_random(&rand) & 0x3ff);
913 old_rp = rcu_dereference_check(rcu_torture_current,
914 current == writer_task);
915 rp->rtort_mbtest = 1;
916 rcu_assign_pointer(rcu_torture_current, rp);
917 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
919 i = old_rp->rtort_pipe_count;
920 if (i > RCU_TORTURE_PIPE_LEN)
921 i = RCU_TORTURE_PIPE_LEN;
922 atomic_inc(&rcu_torture_wcount[i]);
923 old_rp->rtort_pipe_count++;
924 cur_ops->deferred_free(old_rp);
926 rcutorture_record_progress(++rcu_torture_current_version);
927 oldbatch = cur_ops->completed();
928 rcu_stutter_wait("rcu_torture_writer");
929 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
930 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
931 rcutorture_shutdown_absorb("rcu_torture_writer");
932 while (!kthread_should_stop())
933 schedule_timeout_uninterruptible(1);
938 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
939 * delay between calls.
942 rcu_torture_fakewriter(void *arg)
944 DEFINE_RCU_RANDOM(rand);
946 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
947 set_user_nice(current, 19);
950 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
951 udelay(rcu_random(&rand) & 0x3ff);
953 rcu_stutter_wait("rcu_torture_fakewriter");
954 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
956 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
957 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
958 while (!kthread_should_stop())
959 schedule_timeout_uninterruptible(1);
963 void rcutorture_trace_dump(void)
965 static atomic_t beenhere = ATOMIC_INIT(0);
967 if (atomic_read(&beenhere))
969 if (atomic_xchg(&beenhere, 1) != 0)
971 do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
972 ftrace_dump(DUMP_ALL);
976 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
977 * incrementing the corresponding element of the pipeline array. The
978 * counter in the element should never be greater than 1, otherwise, the
979 * RCU implementation is broken.
981 static void rcu_torture_timer(unsigned long unused)
985 static DEFINE_RCU_RANDOM(rand);
986 static DEFINE_SPINLOCK(rand_lock);
987 struct rcu_torture *p;
990 idx = cur_ops->readlock();
991 completed = cur_ops->completed();
992 p = rcu_dereference_check(rcu_torture_current,
993 rcu_read_lock_bh_held() ||
994 rcu_read_lock_sched_held() ||
995 srcu_read_lock_held(&srcu_ctl));
996 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
998 /* Leave because rcu_torture_writer is not yet underway */
999 cur_ops->readunlock(idx);
1002 if (p->rtort_mbtest == 0)
1003 atomic_inc(&n_rcu_torture_mberror);
1004 spin_lock(&rand_lock);
1005 cur_ops->read_delay(&rand);
1006 n_rcu_torture_timers++;
1007 spin_unlock(&rand_lock);
1009 pipe_count = p->rtort_pipe_count;
1010 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1011 /* Should not happen, but... */
1012 pipe_count = RCU_TORTURE_PIPE_LEN;
1015 rcutorture_trace_dump();
1016 __this_cpu_inc(rcu_torture_count[pipe_count]);
1017 completed = cur_ops->completed() - completed;
1018 if (completed > RCU_TORTURE_PIPE_LEN) {
1019 /* Should not happen, but... */
1020 completed = RCU_TORTURE_PIPE_LEN;
1022 __this_cpu_inc(rcu_torture_batch[completed]);
1024 cur_ops->readunlock(idx);
1028 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1029 * incrementing the corresponding element of the pipeline array. The
1030 * counter in the element should never be greater than 1, otherwise, the
1031 * RCU implementation is broken.
1034 rcu_torture_reader(void *arg)
1038 DEFINE_RCU_RANDOM(rand);
1039 struct rcu_torture *p;
1041 struct timer_list t;
1043 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1044 set_user_nice(current, 19);
1045 if (irqreader && cur_ops->irq_capable)
1046 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1049 if (irqreader && cur_ops->irq_capable) {
1050 if (!timer_pending(&t))
1051 mod_timer(&t, jiffies + 1);
1053 idx = cur_ops->readlock();
1054 completed = cur_ops->completed();
1055 p = rcu_dereference_check(rcu_torture_current,
1056 rcu_read_lock_bh_held() ||
1057 rcu_read_lock_sched_held() ||
1058 srcu_read_lock_held(&srcu_ctl));
1059 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1061 /* Wait for rcu_torture_writer to get underway */
1062 cur_ops->readunlock(idx);
1063 schedule_timeout_interruptible(HZ);
1066 if (p->rtort_mbtest == 0)
1067 atomic_inc(&n_rcu_torture_mberror);
1068 cur_ops->read_delay(&rand);
1070 pipe_count = p->rtort_pipe_count;
1071 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1072 /* Should not happen, but... */
1073 pipe_count = RCU_TORTURE_PIPE_LEN;
1076 rcutorture_trace_dump();
1077 __this_cpu_inc(rcu_torture_count[pipe_count]);
1078 completed = cur_ops->completed() - completed;
1079 if (completed > RCU_TORTURE_PIPE_LEN) {
1080 /* Should not happen, but... */
1081 completed = RCU_TORTURE_PIPE_LEN;
1083 __this_cpu_inc(rcu_torture_batch[completed]);
1085 cur_ops->readunlock(idx);
1087 rcu_stutter_wait("rcu_torture_reader");
1088 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1089 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1090 rcutorture_shutdown_absorb("rcu_torture_reader");
1091 if (irqreader && cur_ops->irq_capable)
1093 while (!kthread_should_stop())
1094 schedule_timeout_uninterruptible(1);
1099 * Create an RCU-torture statistics message in the specified buffer.
1102 rcu_torture_printk(char *page)
1107 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1108 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1110 for_each_possible_cpu(cpu) {
1111 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1112 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1113 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1116 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1117 if (pipesummary[i] != 0)
1120 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1121 cnt += sprintf(&page[cnt],
1122 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1123 "rtmbe: %d rtbke: %ld rtbre: %ld "
1124 "rtbf: %ld rtb: %ld nt: %ld "
1125 "onoff: %ld/%ld:%ld/%ld",
1126 rcu_torture_current,
1127 rcu_torture_current_version,
1128 list_empty(&rcu_torture_freelist),
1129 atomic_read(&n_rcu_torture_alloc),
1130 atomic_read(&n_rcu_torture_alloc_fail),
1131 atomic_read(&n_rcu_torture_free),
1132 atomic_read(&n_rcu_torture_mberror),
1133 n_rcu_torture_boost_ktrerror,
1134 n_rcu_torture_boost_rterror,
1135 n_rcu_torture_boost_failure,
1136 n_rcu_torture_boosts,
1137 n_rcu_torture_timers,
1140 n_offline_successes,
1141 n_offline_attempts);
1142 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1143 n_rcu_torture_boost_ktrerror != 0 ||
1144 n_rcu_torture_boost_rterror != 0 ||
1145 n_rcu_torture_boost_failure != 0)
1146 cnt += sprintf(&page[cnt], " !!!");
1147 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1149 cnt += sprintf(&page[cnt], "!!! ");
1150 atomic_inc(&n_rcu_torture_error);
1153 cnt += sprintf(&page[cnt], "Reader Pipe: ");
1154 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1155 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1156 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1157 cnt += sprintf(&page[cnt], "Reader Batch: ");
1158 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1159 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1160 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1161 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1162 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1163 cnt += sprintf(&page[cnt], " %d",
1164 atomic_read(&rcu_torture_wcount[i]));
1166 cnt += sprintf(&page[cnt], "\n");
1168 cnt += cur_ops->stats(&page[cnt]);
1173 * Print torture statistics. Caller must ensure that there is only
1174 * one call to this function at a given time!!! This is normally
1175 * accomplished by relying on the module system to only have one copy
1176 * of the module loaded, and then by giving the rcu_torture_stats
1177 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1178 * thread is not running).
1181 rcu_torture_stats_print(void)
1185 cnt = rcu_torture_printk(printk_buf);
1186 printk(KERN_ALERT "%s", printk_buf);
1190 * Periodically prints torture statistics, if periodic statistics printing
1191 * was specified via the stat_interval module parameter.
1193 * No need to worry about fullstop here, since this one doesn't reference
1194 * volatile state or register callbacks.
1197 rcu_torture_stats(void *arg)
1199 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1201 schedule_timeout_interruptible(stat_interval * HZ);
1202 rcu_torture_stats_print();
1203 rcutorture_shutdown_absorb("rcu_torture_stats");
1204 } while (!kthread_should_stop());
1205 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1209 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
1211 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1212 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1214 static void rcu_torture_shuffle_tasks(void)
1218 cpumask_setall(shuffle_tmp_mask);
1221 /* No point in shuffling if there is only one online CPU (ex: UP) */
1222 if (num_online_cpus() == 1) {
1227 if (rcu_idle_cpu != -1)
1228 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1230 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1233 for (i = 0; i < nrealreaders; i++)
1234 if (reader_tasks[i])
1235 set_cpus_allowed_ptr(reader_tasks[i],
1239 if (fakewriter_tasks) {
1240 for (i = 0; i < nfakewriters; i++)
1241 if (fakewriter_tasks[i])
1242 set_cpus_allowed_ptr(fakewriter_tasks[i],
1247 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1250 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1252 if (rcu_idle_cpu == -1)
1253 rcu_idle_cpu = num_online_cpus() - 1;
1260 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1261 * system to become idle at a time and cut off its timer ticks. This is meant
1262 * to test the support for such tickless idle CPU in RCU.
1265 rcu_torture_shuffle(void *arg)
1267 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1269 schedule_timeout_interruptible(shuffle_interval * HZ);
1270 rcu_torture_shuffle_tasks();
1271 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1272 } while (!kthread_should_stop());
1273 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1277 /* Cause the rcutorture test to "stutter", starting and stopping all
1278 * threads periodically.
1281 rcu_torture_stutter(void *arg)
1283 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1285 schedule_timeout_interruptible(stutter * HZ);
1286 stutter_pause_test = 1;
1287 if (!kthread_should_stop())
1288 schedule_timeout_interruptible(stutter * HZ);
1289 stutter_pause_test = 0;
1290 rcutorture_shutdown_absorb("rcu_torture_stutter");
1291 } while (!kthread_should_stop());
1292 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1297 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1299 printk(KERN_ALERT "%s" TORTURE_FLAG
1300 "--- %s: nreaders=%d nfakewriters=%d "
1301 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1302 "shuffle_interval=%d stutter=%d irqreader=%d "
1303 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1304 "test_boost=%d/%d test_boost_interval=%d "
1305 "test_boost_duration=%d shutdown_secs=%d "
1306 "onoff_interval=%d onoff_holdoff=%d\n",
1307 torture_type, tag, nrealreaders, nfakewriters,
1308 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1309 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1310 test_boost, cur_ops->can_boost,
1311 test_boost_interval, test_boost_duration, shutdown_secs,
1312 onoff_interval, onoff_holdoff);
1315 static struct notifier_block rcutorture_shutdown_nb = {
1316 .notifier_call = rcutorture_shutdown_notify,
1319 static void rcutorture_booster_cleanup(int cpu)
1321 struct task_struct *t;
1323 if (boost_tasks[cpu] == NULL)
1325 mutex_lock(&boost_mutex);
1326 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1327 t = boost_tasks[cpu];
1328 boost_tasks[cpu] = NULL;
1329 mutex_unlock(&boost_mutex);
1331 /* This must be outside of the mutex, otherwise deadlock! */
1335 static int rcutorture_booster_init(int cpu)
1339 if (boost_tasks[cpu] != NULL)
1340 return 0; /* Already created, nothing more to do. */
1342 /* Don't allow time recalculation while creating a new task. */
1343 mutex_lock(&boost_mutex);
1344 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1345 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1347 "rcu_torture_boost");
1348 if (IS_ERR(boost_tasks[cpu])) {
1349 retval = PTR_ERR(boost_tasks[cpu]);
1350 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1351 n_rcu_torture_boost_ktrerror++;
1352 boost_tasks[cpu] = NULL;
1353 mutex_unlock(&boost_mutex);
1356 kthread_bind(boost_tasks[cpu], cpu);
1357 wake_up_process(boost_tasks[cpu]);
1358 mutex_unlock(&boost_mutex);
1363 * Cause the rcutorture test to shutdown the system after the test has
1364 * run for the time specified by the shutdown_secs module parameter.
1367 rcu_torture_shutdown(void *arg)
1370 unsigned long jiffies_snap;
1372 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1373 jiffies_snap = ACCESS_ONCE(jiffies);
1374 while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1375 !kthread_should_stop()) {
1376 delta = shutdown_time - jiffies_snap;
1378 printk(KERN_ALERT "%s" TORTURE_FLAG
1379 "rcu_torture_shutdown task: %lu "
1380 "jiffies remaining\n",
1381 torture_type, delta);
1382 schedule_timeout_interruptible(delta);
1383 jiffies_snap = ACCESS_ONCE(jiffies);
1385 if (kthread_should_stop()) {
1386 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1390 /* OK, shut down the system. */
1392 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1393 shutdown_task = NULL; /* Avoid self-kill deadlock. */
1394 rcu_torture_cleanup(); /* Get the success/failure message. */
1395 kernel_power_off(); /* Shut down the system. */
1399 #ifdef CONFIG_HOTPLUG_CPU
1402 * Execute random CPU-hotplug operations at the interval specified
1403 * by the onoff_interval.
1405 static int __cpuinit
1406 rcu_torture_onoff(void *arg)
1410 DEFINE_RCU_RANDOM(rand);
1412 VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
1413 for_each_online_cpu(cpu)
1415 WARN_ON(maxcpu < 0);
1416 if (onoff_holdoff > 0) {
1417 VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
1418 schedule_timeout_interruptible(onoff_holdoff * HZ);
1419 VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
1421 while (!kthread_should_stop()) {
1422 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
1423 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1425 printk(KERN_ALERT "%s" TORTURE_FLAG
1426 "rcu_torture_onoff task: offlining %d\n",
1428 n_offline_attempts++;
1429 if (cpu_down(cpu) == 0) {
1431 printk(KERN_ALERT "%s" TORTURE_FLAG
1432 "rcu_torture_onoff task: "
1435 n_offline_successes++;
1437 } else if (cpu_is_hotpluggable(cpu)) {
1439 printk(KERN_ALERT "%s" TORTURE_FLAG
1440 "rcu_torture_onoff task: onlining %d\n",
1442 n_online_attempts++;
1443 if (cpu_up(cpu) == 0) {
1445 printk(KERN_ALERT "%s" TORTURE_FLAG
1446 "rcu_torture_onoff task: "
1449 n_online_successes++;
1452 schedule_timeout_interruptible(onoff_interval * HZ);
1454 VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
1458 static int __cpuinit
1459 rcu_torture_onoff_init(void)
1461 if (onoff_interval <= 0)
1463 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1464 if (IS_ERR(onoff_task)) {
1466 return PTR_ERR(onoff_task);
1471 static void rcu_torture_onoff_cleanup(void)
1473 if (onoff_task == NULL)
1475 VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
1476 kthread_stop(onoff_task);
1479 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1482 rcu_torture_onoff_init(void)
1486 static void rcu_torture_onoff_cleanup(void)
1490 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1492 static int rcutorture_cpu_notify(struct notifier_block *self,
1493 unsigned long action, void *hcpu)
1495 long cpu = (long)hcpu;
1499 case CPU_DOWN_FAILED:
1500 (void)rcutorture_booster_init(cpu);
1502 case CPU_DOWN_PREPARE:
1503 rcutorture_booster_cleanup(cpu);
1511 static struct notifier_block rcutorture_cpu_nb = {
1512 .notifier_call = rcutorture_cpu_notify,
1516 rcu_torture_cleanup(void)
1520 mutex_lock(&fullstop_mutex);
1521 rcutorture_record_test_transition();
1522 if (fullstop == FULLSTOP_SHUTDOWN) {
1523 printk(KERN_WARNING /* but going down anyway, so... */
1524 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1525 mutex_unlock(&fullstop_mutex);
1526 schedule_timeout_uninterruptible(10);
1527 if (cur_ops->cb_barrier != NULL)
1528 cur_ops->cb_barrier();
1531 fullstop = FULLSTOP_RMMOD;
1532 mutex_unlock(&fullstop_mutex);
1533 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1535 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1536 kthread_stop(stutter_task);
1538 stutter_task = NULL;
1539 if (shuffler_task) {
1540 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1541 kthread_stop(shuffler_task);
1542 free_cpumask_var(shuffle_tmp_mask);
1544 shuffler_task = NULL;
1547 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1548 kthread_stop(writer_task);
1553 for (i = 0; i < nrealreaders; i++) {
1554 if (reader_tasks[i]) {
1555 VERBOSE_PRINTK_STRING(
1556 "Stopping rcu_torture_reader task");
1557 kthread_stop(reader_tasks[i]);
1559 reader_tasks[i] = NULL;
1561 kfree(reader_tasks);
1562 reader_tasks = NULL;
1564 rcu_torture_current = NULL;
1566 if (fakewriter_tasks) {
1567 for (i = 0; i < nfakewriters; i++) {
1568 if (fakewriter_tasks[i]) {
1569 VERBOSE_PRINTK_STRING(
1570 "Stopping rcu_torture_fakewriter task");
1571 kthread_stop(fakewriter_tasks[i]);
1573 fakewriter_tasks[i] = NULL;
1575 kfree(fakewriter_tasks);
1576 fakewriter_tasks = NULL;
1580 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1581 kthread_stop(stats_task);
1586 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1587 kthread_stop(fqs_task);
1590 if ((test_boost == 1 && cur_ops->can_boost) ||
1592 unregister_cpu_notifier(&rcutorture_cpu_nb);
1593 for_each_possible_cpu(i)
1594 rcutorture_booster_cleanup(i);
1596 if (shutdown_task != NULL) {
1597 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1598 kthread_stop(shutdown_task);
1600 rcu_torture_onoff_cleanup();
1602 /* Wait for all RCU callbacks to fire. */
1604 if (cur_ops->cb_barrier != NULL)
1605 cur_ops->cb_barrier();
1607 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1609 if (cur_ops->cleanup)
1611 if (atomic_read(&n_rcu_torture_error))
1612 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1613 else if (n_online_successes != n_online_attempts ||
1614 n_offline_successes != n_offline_attempts)
1615 rcu_torture_print_module_parms(cur_ops,
1616 "End of test: RCU_HOTPLUG");
1618 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1622 rcu_torture_init(void)
1627 static struct rcu_torture_ops *torture_ops[] =
1628 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1629 &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1630 &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops,
1631 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1633 mutex_lock(&fullstop_mutex);
1635 /* Process args and tell the world that the torturer is on the job. */
1636 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1637 cur_ops = torture_ops[i];
1638 if (strcmp(torture_type, cur_ops->name) == 0)
1641 if (i == ARRAY_SIZE(torture_ops)) {
1642 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1644 printk(KERN_ALERT "rcu-torture types:");
1645 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1646 printk(KERN_ALERT " %s", torture_ops[i]->name);
1647 printk(KERN_ALERT "\n");
1648 mutex_unlock(&fullstop_mutex);
1651 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1652 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1653 "fqs_duration, fqs disabled.\n");
1657 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1660 nrealreaders = nreaders;
1662 nrealreaders = 2 * num_online_cpus();
1663 rcu_torture_print_module_parms(cur_ops, "Start of test");
1664 fullstop = FULLSTOP_DONTSTOP;
1666 /* Set up the freelist. */
1668 INIT_LIST_HEAD(&rcu_torture_freelist);
1669 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1670 rcu_tortures[i].rtort_mbtest = 0;
1671 list_add_tail(&rcu_tortures[i].rtort_free,
1672 &rcu_torture_freelist);
1675 /* Initialize the statistics so that each run gets its own numbers. */
1677 rcu_torture_current = NULL;
1678 rcu_torture_current_version = 0;
1679 atomic_set(&n_rcu_torture_alloc, 0);
1680 atomic_set(&n_rcu_torture_alloc_fail, 0);
1681 atomic_set(&n_rcu_torture_free, 0);
1682 atomic_set(&n_rcu_torture_mberror, 0);
1683 atomic_set(&n_rcu_torture_error, 0);
1684 n_rcu_torture_boost_ktrerror = 0;
1685 n_rcu_torture_boost_rterror = 0;
1686 n_rcu_torture_boost_failure = 0;
1687 n_rcu_torture_boosts = 0;
1688 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1689 atomic_set(&rcu_torture_wcount[i], 0);
1690 for_each_possible_cpu(cpu) {
1691 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1692 per_cpu(rcu_torture_count, cpu)[i] = 0;
1693 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1697 /* Start up the kthreads. */
1699 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1700 writer_task = kthread_run(rcu_torture_writer, NULL,
1701 "rcu_torture_writer");
1702 if (IS_ERR(writer_task)) {
1703 firsterr = PTR_ERR(writer_task);
1704 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1708 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1710 if (fakewriter_tasks == NULL) {
1711 VERBOSE_PRINTK_ERRSTRING("out of memory");
1715 for (i = 0; i < nfakewriters; i++) {
1716 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1717 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1718 "rcu_torture_fakewriter");
1719 if (IS_ERR(fakewriter_tasks[i])) {
1720 firsterr = PTR_ERR(fakewriter_tasks[i]);
1721 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1722 fakewriter_tasks[i] = NULL;
1726 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1728 if (reader_tasks == NULL) {
1729 VERBOSE_PRINTK_ERRSTRING("out of memory");
1733 for (i = 0; i < nrealreaders; i++) {
1734 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1735 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1736 "rcu_torture_reader");
1737 if (IS_ERR(reader_tasks[i])) {
1738 firsterr = PTR_ERR(reader_tasks[i]);
1739 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1740 reader_tasks[i] = NULL;
1744 if (stat_interval > 0) {
1745 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1746 stats_task = kthread_run(rcu_torture_stats, NULL,
1747 "rcu_torture_stats");
1748 if (IS_ERR(stats_task)) {
1749 firsterr = PTR_ERR(stats_task);
1750 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1755 if (test_no_idle_hz) {
1756 rcu_idle_cpu = num_online_cpus() - 1;
1758 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1760 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1764 /* Create the shuffler thread */
1765 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1766 "rcu_torture_shuffle");
1767 if (IS_ERR(shuffler_task)) {
1768 free_cpumask_var(shuffle_tmp_mask);
1769 firsterr = PTR_ERR(shuffler_task);
1770 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1771 shuffler_task = NULL;
1778 /* Create the stutter thread */
1779 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1780 "rcu_torture_stutter");
1781 if (IS_ERR(stutter_task)) {
1782 firsterr = PTR_ERR(stutter_task);
1783 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1784 stutter_task = NULL;
1788 if (fqs_duration < 0)
1791 /* Create the stutter thread */
1792 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1794 if (IS_ERR(fqs_task)) {
1795 firsterr = PTR_ERR(fqs_task);
1796 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1801 if (test_boost_interval < 1)
1802 test_boost_interval = 1;
1803 if (test_boost_duration < 2)
1804 test_boost_duration = 2;
1805 if ((test_boost == 1 && cur_ops->can_boost) ||
1809 boost_starttime = jiffies + test_boost_interval * HZ;
1810 register_cpu_notifier(&rcutorture_cpu_nb);
1811 for_each_possible_cpu(i) {
1812 if (cpu_is_offline(i))
1813 continue; /* Heuristic: CPU can go offline. */
1814 retval = rcutorture_booster_init(i);
1821 if (shutdown_secs > 0) {
1822 shutdown_time = jiffies + shutdown_secs * HZ;
1823 shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
1824 "rcu_torture_shutdown");
1825 if (IS_ERR(shutdown_task)) {
1826 firsterr = PTR_ERR(shutdown_task);
1827 VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
1828 shutdown_task = NULL;
1832 rcu_torture_onoff_init();
1833 register_reboot_notifier(&rcutorture_shutdown_nb);
1834 rcutorture_record_test_transition();
1835 mutex_unlock(&fullstop_mutex);
1839 mutex_unlock(&fullstop_mutex);
1840 rcu_torture_cleanup();
1844 module_init(rcu_torture_init);
1845 module_exit(rcu_torture_cleanup);