2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53 "Josh Triplett <josh@freedesktop.org>");
55 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4; /* # fake writer threads */
57 static int stat_interval; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */
59 static int verbose; /* Print more debug info. */
60 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 static int stutter = 5; /* Start/stop testing interval (in sec) */
63 static int irqreader = 1; /* RCU readers from irq (timers). */
64 static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
65 static int fqs_holdoff; /* Hold time within burst (us). */
66 static int fqs_stutter = 3; /* Wait time between bursts (s). */
67 static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
68 static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
69 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
70 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
71 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
73 module_param(nreaders, int, 0444);
74 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
75 module_param(nfakewriters, int, 0444);
76 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
77 module_param(stat_interval, int, 0644);
78 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
79 module_param(verbose, bool, 0444);
80 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
81 module_param(test_no_idle_hz, bool, 0444);
82 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
83 module_param(shuffle_interval, int, 0444);
84 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
85 module_param(stutter, int, 0444);
86 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
87 module_param(irqreader, int, 0444);
88 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
89 module_param(fqs_duration, int, 0444);
90 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
91 module_param(fqs_holdoff, int, 0444);
92 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
93 module_param(fqs_stutter, int, 0444);
94 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
95 module_param(shutdown_secs, int, 0444);
96 MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
97 module_param(test_boost, int, 0444);
98 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
99 module_param(test_boost_interval, int, 0444);
100 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
101 module_param(test_boost_duration, int, 0444);
102 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
103 module_param(torture_type, charp, 0444);
104 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
106 #define TORTURE_FLAG "-torture:"
107 #define PRINTK_STRING(s) \
108 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
109 #define VERBOSE_PRINTK_STRING(s) \
110 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
111 #define VERBOSE_PRINTK_ERRSTRING(s) \
112 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
114 static char printk_buf[4096];
116 static int nrealreaders;
117 static struct task_struct *writer_task;
118 static struct task_struct **fakewriter_tasks;
119 static struct task_struct **reader_tasks;
120 static struct task_struct *stats_task;
121 static struct task_struct *shuffler_task;
122 static struct task_struct *stutter_task;
123 static struct task_struct *fqs_task;
124 static struct task_struct *boost_tasks[NR_CPUS];
125 static struct task_struct *shutdown_task;
127 #define RCU_TORTURE_PIPE_LEN 10
130 struct rcu_head rtort_rcu;
131 int rtort_pipe_count;
132 struct list_head rtort_free;
136 static LIST_HEAD(rcu_torture_freelist);
137 static struct rcu_torture __rcu *rcu_torture_current;
138 static unsigned long rcu_torture_current_version;
139 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
140 static DEFINE_SPINLOCK(rcu_torture_lock);
141 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
143 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
145 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
146 static atomic_t n_rcu_torture_alloc;
147 static atomic_t n_rcu_torture_alloc_fail;
148 static atomic_t n_rcu_torture_free;
149 static atomic_t n_rcu_torture_mberror;
150 static atomic_t n_rcu_torture_error;
151 static long n_rcu_torture_boost_ktrerror;
152 static long n_rcu_torture_boost_rterror;
153 static long n_rcu_torture_boost_failure;
154 static long n_rcu_torture_boosts;
155 static long n_rcu_torture_timers;
156 static struct list_head rcu_torture_removed;
157 static cpumask_var_t shuffle_tmp_mask;
159 static int stutter_pause_test;
161 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
162 #define RCUTORTURE_RUNNABLE_INIT 1
164 #define RCUTORTURE_RUNNABLE_INIT 0
166 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
168 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
169 #define rcu_can_boost() 1
170 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
171 #define rcu_can_boost() 0
172 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
174 static unsigned long shutdown_time; /* jiffies to system shutdown. */
175 static unsigned long boost_starttime; /* jiffies of next boost test start. */
176 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
177 /* and boost task create/destroy. */
179 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
181 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
182 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
183 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
184 static int fullstop = FULLSTOP_RMMOD;
186 * Protect fullstop transitions and spawning of kthreads.
188 static DEFINE_MUTEX(fullstop_mutex);
190 /* Forward reference. */
191 static void rcu_torture_cleanup(void);
194 * Detect and respond to a system shutdown.
197 rcutorture_shutdown_notify(struct notifier_block *unused1,
198 unsigned long unused2, void *unused3)
200 mutex_lock(&fullstop_mutex);
201 if (fullstop == FULLSTOP_DONTSTOP)
202 fullstop = FULLSTOP_SHUTDOWN;
204 printk(KERN_WARNING /* but going down anyway, so... */
205 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
206 mutex_unlock(&fullstop_mutex);
211 * Absorb kthreads into a kernel function that won't return, so that
212 * they won't ever access module text or data again.
214 static void rcutorture_shutdown_absorb(char *title)
216 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
218 "rcutorture thread %s parking due to system shutdown\n",
220 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
225 * Allocate an element from the rcu_tortures pool.
227 static struct rcu_torture *
228 rcu_torture_alloc(void)
232 spin_lock_bh(&rcu_torture_lock);
233 if (list_empty(&rcu_torture_freelist)) {
234 atomic_inc(&n_rcu_torture_alloc_fail);
235 spin_unlock_bh(&rcu_torture_lock);
238 atomic_inc(&n_rcu_torture_alloc);
239 p = rcu_torture_freelist.next;
241 spin_unlock_bh(&rcu_torture_lock);
242 return container_of(p, struct rcu_torture, rtort_free);
246 * Free an element to the rcu_tortures pool.
249 rcu_torture_free(struct rcu_torture *p)
251 atomic_inc(&n_rcu_torture_free);
252 spin_lock_bh(&rcu_torture_lock);
253 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
254 spin_unlock_bh(&rcu_torture_lock);
257 struct rcu_random_state {
258 unsigned long rrs_state;
262 #define RCU_RANDOM_MULT 39916801 /* prime */
263 #define RCU_RANDOM_ADD 479001701 /* prime */
264 #define RCU_RANDOM_REFRESH 10000
266 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
269 * Crude but fast random-number generator. Uses a linear congruential
270 * generator, with occasional help from cpu_clock().
273 rcu_random(struct rcu_random_state *rrsp)
275 if (--rrsp->rrs_count < 0) {
276 rrsp->rrs_state += (unsigned long)local_clock();
277 rrsp->rrs_count = RCU_RANDOM_REFRESH;
279 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
280 return swahw32(rrsp->rrs_state);
284 rcu_stutter_wait(char *title)
286 while (stutter_pause_test || !rcutorture_runnable) {
287 if (rcutorture_runnable)
288 schedule_timeout_interruptible(1);
290 schedule_timeout_interruptible(round_jiffies_relative(HZ));
291 rcutorture_shutdown_absorb(title);
296 * Operations vector for selecting different types of tests.
299 struct rcu_torture_ops {
301 void (*cleanup)(void);
302 int (*readlock)(void);
303 void (*read_delay)(struct rcu_random_state *rrsp);
304 void (*readunlock)(int idx);
305 int (*completed)(void);
306 void (*deferred_free)(struct rcu_torture *p);
308 void (*cb_barrier)(void);
310 int (*stats)(char *page);
316 static struct rcu_torture_ops *cur_ops;
319 * Definitions for rcu torture testing.
322 static int rcu_torture_read_lock(void) __acquires(RCU)
328 static void rcu_read_delay(struct rcu_random_state *rrsp)
330 const unsigned long shortdelay_us = 200;
331 const unsigned long longdelay_ms = 50;
333 /* We want a short delay sometimes to make a reader delay the grace
334 * period, and we want a long delay occasionally to trigger
335 * force_quiescent_state. */
337 if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
338 mdelay(longdelay_ms);
339 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
340 udelay(shortdelay_us);
341 #ifdef CONFIG_PREEMPT
342 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
343 preempt_schedule(); /* No QS if preempt_disable() in effect */
347 static void rcu_torture_read_unlock(int idx) __releases(RCU)
352 static int rcu_torture_completed(void)
354 return rcu_batches_completed();
358 rcu_torture_cb(struct rcu_head *p)
361 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
363 if (fullstop != FULLSTOP_DONTSTOP) {
364 /* Test is ending, just drop callbacks on the floor. */
365 /* The next initialization will pick up the pieces. */
368 i = rp->rtort_pipe_count;
369 if (i > RCU_TORTURE_PIPE_LEN)
370 i = RCU_TORTURE_PIPE_LEN;
371 atomic_inc(&rcu_torture_wcount[i]);
372 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
373 rp->rtort_mbtest = 0;
374 rcu_torture_free(rp);
376 cur_ops->deferred_free(rp);
379 static int rcu_no_completed(void)
384 static void rcu_torture_deferred_free(struct rcu_torture *p)
386 call_rcu(&p->rtort_rcu, rcu_torture_cb);
389 static struct rcu_torture_ops rcu_ops = {
392 .readlock = rcu_torture_read_lock,
393 .read_delay = rcu_read_delay,
394 .readunlock = rcu_torture_read_unlock,
395 .completed = rcu_torture_completed,
396 .deferred_free = rcu_torture_deferred_free,
397 .sync = synchronize_rcu,
398 .cb_barrier = rcu_barrier,
399 .fqs = rcu_force_quiescent_state,
402 .can_boost = rcu_can_boost(),
406 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
409 struct rcu_torture *rp;
410 struct rcu_torture *rp1;
413 list_add(&p->rtort_free, &rcu_torture_removed);
414 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
415 i = rp->rtort_pipe_count;
416 if (i > RCU_TORTURE_PIPE_LEN)
417 i = RCU_TORTURE_PIPE_LEN;
418 atomic_inc(&rcu_torture_wcount[i]);
419 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
420 rp->rtort_mbtest = 0;
421 list_del(&rp->rtort_free);
422 rcu_torture_free(rp);
427 static void rcu_sync_torture_init(void)
429 INIT_LIST_HEAD(&rcu_torture_removed);
432 static struct rcu_torture_ops rcu_sync_ops = {
433 .init = rcu_sync_torture_init,
435 .readlock = rcu_torture_read_lock,
436 .read_delay = rcu_read_delay,
437 .readunlock = rcu_torture_read_unlock,
438 .completed = rcu_torture_completed,
439 .deferred_free = rcu_sync_torture_deferred_free,
440 .sync = synchronize_rcu,
442 .fqs = rcu_force_quiescent_state,
445 .can_boost = rcu_can_boost(),
449 static struct rcu_torture_ops rcu_expedited_ops = {
450 .init = rcu_sync_torture_init,
452 .readlock = rcu_torture_read_lock,
453 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
454 .readunlock = rcu_torture_read_unlock,
455 .completed = rcu_no_completed,
456 .deferred_free = rcu_sync_torture_deferred_free,
457 .sync = synchronize_rcu_expedited,
459 .fqs = rcu_force_quiescent_state,
462 .can_boost = rcu_can_boost(),
463 .name = "rcu_expedited"
467 * Definitions for rcu_bh torture testing.
470 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
476 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
478 rcu_read_unlock_bh();
481 static int rcu_bh_torture_completed(void)
483 return rcu_batches_completed_bh();
486 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
488 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
491 static struct rcu_torture_ops rcu_bh_ops = {
494 .readlock = rcu_bh_torture_read_lock,
495 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
496 .readunlock = rcu_bh_torture_read_unlock,
497 .completed = rcu_bh_torture_completed,
498 .deferred_free = rcu_bh_torture_deferred_free,
499 .sync = synchronize_rcu_bh,
500 .cb_barrier = rcu_barrier_bh,
501 .fqs = rcu_bh_force_quiescent_state,
507 static struct rcu_torture_ops rcu_bh_sync_ops = {
508 .init = rcu_sync_torture_init,
510 .readlock = rcu_bh_torture_read_lock,
511 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
512 .readunlock = rcu_bh_torture_read_unlock,
513 .completed = rcu_bh_torture_completed,
514 .deferred_free = rcu_sync_torture_deferred_free,
515 .sync = synchronize_rcu_bh,
517 .fqs = rcu_bh_force_quiescent_state,
520 .name = "rcu_bh_sync"
523 static struct rcu_torture_ops rcu_bh_expedited_ops = {
524 .init = rcu_sync_torture_init,
526 .readlock = rcu_bh_torture_read_lock,
527 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
528 .readunlock = rcu_bh_torture_read_unlock,
529 .completed = rcu_bh_torture_completed,
530 .deferred_free = rcu_sync_torture_deferred_free,
531 .sync = synchronize_rcu_bh_expedited,
533 .fqs = rcu_bh_force_quiescent_state,
536 .name = "rcu_bh_expedited"
540 * Definitions for srcu torture testing.
543 static struct srcu_struct srcu_ctl;
545 static void srcu_torture_init(void)
547 init_srcu_struct(&srcu_ctl);
548 rcu_sync_torture_init();
551 static void srcu_torture_cleanup(void)
553 synchronize_srcu(&srcu_ctl);
554 cleanup_srcu_struct(&srcu_ctl);
557 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
559 return srcu_read_lock(&srcu_ctl);
562 static void srcu_read_delay(struct rcu_random_state *rrsp)
565 const long uspertick = 1000000 / HZ;
566 const long longdelay = 10;
568 /* We want there to be long-running readers, but not all the time. */
570 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
572 schedule_timeout_interruptible(longdelay);
574 rcu_read_delay(rrsp);
577 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
579 srcu_read_unlock(&srcu_ctl, idx);
582 static int srcu_torture_completed(void)
584 return srcu_batches_completed(&srcu_ctl);
587 static void srcu_torture_synchronize(void)
589 synchronize_srcu(&srcu_ctl);
592 static int srcu_torture_stats(char *page)
596 int idx = srcu_ctl.completed & 0x1;
598 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
599 torture_type, TORTURE_FLAG, idx);
600 for_each_possible_cpu(cpu) {
601 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
602 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
603 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
605 cnt += sprintf(&page[cnt], "\n");
609 static struct rcu_torture_ops srcu_ops = {
610 .init = srcu_torture_init,
611 .cleanup = srcu_torture_cleanup,
612 .readlock = srcu_torture_read_lock,
613 .read_delay = srcu_read_delay,
614 .readunlock = srcu_torture_read_unlock,
615 .completed = srcu_torture_completed,
616 .deferred_free = rcu_sync_torture_deferred_free,
617 .sync = srcu_torture_synchronize,
619 .stats = srcu_torture_stats,
623 static void srcu_torture_synchronize_expedited(void)
625 synchronize_srcu_expedited(&srcu_ctl);
628 static struct rcu_torture_ops srcu_expedited_ops = {
629 .init = srcu_torture_init,
630 .cleanup = srcu_torture_cleanup,
631 .readlock = srcu_torture_read_lock,
632 .read_delay = srcu_read_delay,
633 .readunlock = srcu_torture_read_unlock,
634 .completed = srcu_torture_completed,
635 .deferred_free = rcu_sync_torture_deferred_free,
636 .sync = srcu_torture_synchronize_expedited,
638 .stats = srcu_torture_stats,
639 .name = "srcu_expedited"
643 * Definitions for sched torture testing.
646 static int sched_torture_read_lock(void)
652 static void sched_torture_read_unlock(int idx)
657 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
659 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
662 static struct rcu_torture_ops sched_ops = {
663 .init = rcu_sync_torture_init,
665 .readlock = sched_torture_read_lock,
666 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
667 .readunlock = sched_torture_read_unlock,
668 .completed = rcu_no_completed,
669 .deferred_free = rcu_sched_torture_deferred_free,
670 .sync = synchronize_sched,
671 .cb_barrier = rcu_barrier_sched,
672 .fqs = rcu_sched_force_quiescent_state,
678 static struct rcu_torture_ops sched_sync_ops = {
679 .init = rcu_sync_torture_init,
681 .readlock = sched_torture_read_lock,
682 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
683 .readunlock = sched_torture_read_unlock,
684 .completed = rcu_no_completed,
685 .deferred_free = rcu_sync_torture_deferred_free,
686 .sync = synchronize_sched,
688 .fqs = rcu_sched_force_quiescent_state,
693 static struct rcu_torture_ops sched_expedited_ops = {
694 .init = rcu_sync_torture_init,
696 .readlock = sched_torture_read_lock,
697 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
698 .readunlock = sched_torture_read_unlock,
699 .completed = rcu_no_completed,
700 .deferred_free = rcu_sync_torture_deferred_free,
701 .sync = synchronize_sched_expedited,
703 .fqs = rcu_sched_force_quiescent_state,
706 .name = "sched_expedited"
710 * RCU torture priority-boost testing. Runs one real-time thread per
711 * CPU for moderate bursts, repeatedly registering RCU callbacks and
712 * spinning waiting for them to be invoked. If a given callback takes
713 * too long to be invoked, we assume that priority inversion has occurred.
716 struct rcu_boost_inflight {
721 static void rcu_torture_boost_cb(struct rcu_head *head)
723 struct rcu_boost_inflight *rbip =
724 container_of(head, struct rcu_boost_inflight, rcu);
726 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
730 static int rcu_torture_boost(void *arg)
732 unsigned long call_rcu_time;
733 unsigned long endtime;
734 unsigned long oldstarttime;
735 struct rcu_boost_inflight rbi = { .inflight = 0 };
736 struct sched_param sp;
738 VERBOSE_PRINTK_STRING("rcu_torture_boost started");
740 /* Set real-time priority. */
741 sp.sched_priority = 1;
742 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
743 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
744 n_rcu_torture_boost_rterror++;
747 init_rcu_head_on_stack(&rbi.rcu);
748 /* Each pass through the following loop does one boost-test cycle. */
750 /* Wait for the next test interval. */
751 oldstarttime = boost_starttime;
752 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
753 schedule_timeout_uninterruptible(1);
754 rcu_stutter_wait("rcu_torture_boost");
755 if (kthread_should_stop() ||
756 fullstop != FULLSTOP_DONTSTOP)
760 /* Do one boost-test interval. */
761 endtime = oldstarttime + test_boost_duration * HZ;
762 call_rcu_time = jiffies;
763 while (ULONG_CMP_LT(jiffies, endtime)) {
764 /* If we don't have a callback in flight, post one. */
766 smp_mb(); /* RCU core before ->inflight = 1. */
768 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
769 if (jiffies - call_rcu_time >
770 test_boost_duration * HZ - HZ / 2) {
771 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
772 n_rcu_torture_boost_failure++;
774 call_rcu_time = jiffies;
777 rcu_stutter_wait("rcu_torture_boost");
778 if (kthread_should_stop() ||
779 fullstop != FULLSTOP_DONTSTOP)
784 * Set the start time of the next test interval.
785 * Yes, this is vulnerable to long delays, but such
786 * delays simply cause a false negative for the next
787 * interval. Besides, we are running at RT priority,
788 * so delays should be relatively rare.
790 while (oldstarttime == boost_starttime &&
791 !kthread_should_stop()) {
792 if (mutex_trylock(&boost_mutex)) {
793 boost_starttime = jiffies +
794 test_boost_interval * HZ;
795 n_rcu_torture_boosts++;
796 mutex_unlock(&boost_mutex);
799 schedule_timeout_uninterruptible(1);
802 /* Go do the stutter. */
803 checkwait: rcu_stutter_wait("rcu_torture_boost");
804 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
806 /* Clean up and exit. */
807 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
808 rcutorture_shutdown_absorb("rcu_torture_boost");
809 while (!kthread_should_stop() || rbi.inflight)
810 schedule_timeout_uninterruptible(1);
811 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
812 destroy_rcu_head_on_stack(&rbi.rcu);
817 * RCU torture force-quiescent-state kthread. Repeatedly induces
818 * bursts of calls to force_quiescent_state(), increasing the probability
819 * of occurrence of some important types of race conditions.
822 rcu_torture_fqs(void *arg)
824 unsigned long fqs_resume_time;
825 int fqs_burst_remaining;
827 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
829 fqs_resume_time = jiffies + fqs_stutter * HZ;
830 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
831 !kthread_should_stop()) {
832 schedule_timeout_interruptible(1);
834 fqs_burst_remaining = fqs_duration;
835 while (fqs_burst_remaining > 0 &&
836 !kthread_should_stop()) {
839 fqs_burst_remaining -= fqs_holdoff;
841 rcu_stutter_wait("rcu_torture_fqs");
842 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
843 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
844 rcutorture_shutdown_absorb("rcu_torture_fqs");
845 while (!kthread_should_stop())
846 schedule_timeout_uninterruptible(1);
851 * RCU torture writer kthread. Repeatedly substitutes a new structure
852 * for that pointed to by rcu_torture_current, freeing the old structure
853 * after a series of grace periods (the "pipeline").
856 rcu_torture_writer(void *arg)
859 long oldbatch = rcu_batches_completed();
860 struct rcu_torture *rp;
861 struct rcu_torture *old_rp;
862 static DEFINE_RCU_RANDOM(rand);
864 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
865 set_user_nice(current, 19);
868 schedule_timeout_uninterruptible(1);
869 rp = rcu_torture_alloc();
872 rp->rtort_pipe_count = 0;
873 udelay(rcu_random(&rand) & 0x3ff);
874 old_rp = rcu_dereference_check(rcu_torture_current,
875 current == writer_task);
876 rp->rtort_mbtest = 1;
877 rcu_assign_pointer(rcu_torture_current, rp);
878 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
880 i = old_rp->rtort_pipe_count;
881 if (i > RCU_TORTURE_PIPE_LEN)
882 i = RCU_TORTURE_PIPE_LEN;
883 atomic_inc(&rcu_torture_wcount[i]);
884 old_rp->rtort_pipe_count++;
885 cur_ops->deferred_free(old_rp);
887 rcutorture_record_progress(++rcu_torture_current_version);
888 oldbatch = cur_ops->completed();
889 rcu_stutter_wait("rcu_torture_writer");
890 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
891 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
892 rcutorture_shutdown_absorb("rcu_torture_writer");
893 while (!kthread_should_stop())
894 schedule_timeout_uninterruptible(1);
899 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
900 * delay between calls.
903 rcu_torture_fakewriter(void *arg)
905 DEFINE_RCU_RANDOM(rand);
907 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
908 set_user_nice(current, 19);
911 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
912 udelay(rcu_random(&rand) & 0x3ff);
914 rcu_stutter_wait("rcu_torture_fakewriter");
915 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
917 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
918 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
919 while (!kthread_should_stop())
920 schedule_timeout_uninterruptible(1);
924 void rcutorture_trace_dump(void)
926 static atomic_t beenhere = ATOMIC_INIT(0);
928 if (atomic_read(&beenhere))
930 if (atomic_xchg(&beenhere, 1) != 0)
932 do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
933 ftrace_dump(DUMP_ALL);
937 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
938 * incrementing the corresponding element of the pipeline array. The
939 * counter in the element should never be greater than 1, otherwise, the
940 * RCU implementation is broken.
942 static void rcu_torture_timer(unsigned long unused)
946 static DEFINE_RCU_RANDOM(rand);
947 static DEFINE_SPINLOCK(rand_lock);
948 struct rcu_torture *p;
951 idx = cur_ops->readlock();
952 completed = cur_ops->completed();
953 p = rcu_dereference_check(rcu_torture_current,
954 rcu_read_lock_bh_held() ||
955 rcu_read_lock_sched_held() ||
956 srcu_read_lock_held(&srcu_ctl));
957 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
959 /* Leave because rcu_torture_writer is not yet underway */
960 cur_ops->readunlock(idx);
963 if (p->rtort_mbtest == 0)
964 atomic_inc(&n_rcu_torture_mberror);
965 spin_lock(&rand_lock);
966 cur_ops->read_delay(&rand);
967 n_rcu_torture_timers++;
968 spin_unlock(&rand_lock);
970 pipe_count = p->rtort_pipe_count;
971 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
972 /* Should not happen, but... */
973 pipe_count = RCU_TORTURE_PIPE_LEN;
976 rcutorture_trace_dump();
977 __this_cpu_inc(rcu_torture_count[pipe_count]);
978 completed = cur_ops->completed() - completed;
979 if (completed > RCU_TORTURE_PIPE_LEN) {
980 /* Should not happen, but... */
981 completed = RCU_TORTURE_PIPE_LEN;
983 __this_cpu_inc(rcu_torture_batch[completed]);
985 cur_ops->readunlock(idx);
989 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
990 * incrementing the corresponding element of the pipeline array. The
991 * counter in the element should never be greater than 1, otherwise, the
992 * RCU implementation is broken.
995 rcu_torture_reader(void *arg)
999 DEFINE_RCU_RANDOM(rand);
1000 struct rcu_torture *p;
1002 struct timer_list t;
1004 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1005 set_user_nice(current, 19);
1006 if (irqreader && cur_ops->irq_capable)
1007 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1010 if (irqreader && cur_ops->irq_capable) {
1011 if (!timer_pending(&t))
1012 mod_timer(&t, jiffies + 1);
1014 idx = cur_ops->readlock();
1015 completed = cur_ops->completed();
1016 p = rcu_dereference_check(rcu_torture_current,
1017 rcu_read_lock_bh_held() ||
1018 rcu_read_lock_sched_held() ||
1019 srcu_read_lock_held(&srcu_ctl));
1020 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1022 /* Wait for rcu_torture_writer to get underway */
1023 cur_ops->readunlock(idx);
1024 schedule_timeout_interruptible(HZ);
1027 if (p->rtort_mbtest == 0)
1028 atomic_inc(&n_rcu_torture_mberror);
1029 cur_ops->read_delay(&rand);
1031 pipe_count = p->rtort_pipe_count;
1032 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1033 /* Should not happen, but... */
1034 pipe_count = RCU_TORTURE_PIPE_LEN;
1037 rcutorture_trace_dump();
1038 __this_cpu_inc(rcu_torture_count[pipe_count]);
1039 completed = cur_ops->completed() - completed;
1040 if (completed > RCU_TORTURE_PIPE_LEN) {
1041 /* Should not happen, but... */
1042 completed = RCU_TORTURE_PIPE_LEN;
1044 __this_cpu_inc(rcu_torture_batch[completed]);
1046 cur_ops->readunlock(idx);
1048 rcu_stutter_wait("rcu_torture_reader");
1049 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1050 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1051 rcutorture_shutdown_absorb("rcu_torture_reader");
1052 if (irqreader && cur_ops->irq_capable)
1054 while (!kthread_should_stop())
1055 schedule_timeout_uninterruptible(1);
1060 * Create an RCU-torture statistics message in the specified buffer.
1063 rcu_torture_printk(char *page)
1068 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1069 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1071 for_each_possible_cpu(cpu) {
1072 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1073 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1074 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1077 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1078 if (pipesummary[i] != 0)
1081 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1082 cnt += sprintf(&page[cnt],
1083 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1084 "rtmbe: %d rtbke: %ld rtbre: %ld "
1085 "rtbf: %ld rtb: %ld nt: %ld",
1086 rcu_torture_current,
1087 rcu_torture_current_version,
1088 list_empty(&rcu_torture_freelist),
1089 atomic_read(&n_rcu_torture_alloc),
1090 atomic_read(&n_rcu_torture_alloc_fail),
1091 atomic_read(&n_rcu_torture_free),
1092 atomic_read(&n_rcu_torture_mberror),
1093 n_rcu_torture_boost_ktrerror,
1094 n_rcu_torture_boost_rterror,
1095 n_rcu_torture_boost_failure,
1096 n_rcu_torture_boosts,
1097 n_rcu_torture_timers);
1098 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1099 n_rcu_torture_boost_ktrerror != 0 ||
1100 n_rcu_torture_boost_rterror != 0 ||
1101 n_rcu_torture_boost_failure != 0)
1102 cnt += sprintf(&page[cnt], " !!!");
1103 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1105 cnt += sprintf(&page[cnt], "!!! ");
1106 atomic_inc(&n_rcu_torture_error);
1109 cnt += sprintf(&page[cnt], "Reader Pipe: ");
1110 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1111 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1112 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1113 cnt += sprintf(&page[cnt], "Reader Batch: ");
1114 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1115 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1116 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1117 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1118 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1119 cnt += sprintf(&page[cnt], " %d",
1120 atomic_read(&rcu_torture_wcount[i]));
1122 cnt += sprintf(&page[cnt], "\n");
1124 cnt += cur_ops->stats(&page[cnt]);
1129 * Print torture statistics. Caller must ensure that there is only
1130 * one call to this function at a given time!!! This is normally
1131 * accomplished by relying on the module system to only have one copy
1132 * of the module loaded, and then by giving the rcu_torture_stats
1133 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1134 * thread is not running).
1137 rcu_torture_stats_print(void)
1141 cnt = rcu_torture_printk(printk_buf);
1142 printk(KERN_ALERT "%s", printk_buf);
1146 * Periodically prints torture statistics, if periodic statistics printing
1147 * was specified via the stat_interval module parameter.
1149 * No need to worry about fullstop here, since this one doesn't reference
1150 * volatile state or register callbacks.
1153 rcu_torture_stats(void *arg)
1155 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1157 schedule_timeout_interruptible(stat_interval * HZ);
1158 rcu_torture_stats_print();
1159 rcutorture_shutdown_absorb("rcu_torture_stats");
1160 } while (!kthread_should_stop());
1161 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1165 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
1167 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1168 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1170 static void rcu_torture_shuffle_tasks(void)
1174 cpumask_setall(shuffle_tmp_mask);
1177 /* No point in shuffling if there is only one online CPU (ex: UP) */
1178 if (num_online_cpus() == 1) {
1183 if (rcu_idle_cpu != -1)
1184 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1186 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1189 for (i = 0; i < nrealreaders; i++)
1190 if (reader_tasks[i])
1191 set_cpus_allowed_ptr(reader_tasks[i],
1195 if (fakewriter_tasks) {
1196 for (i = 0; i < nfakewriters; i++)
1197 if (fakewriter_tasks[i])
1198 set_cpus_allowed_ptr(fakewriter_tasks[i],
1203 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1206 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1208 if (rcu_idle_cpu == -1)
1209 rcu_idle_cpu = num_online_cpus() - 1;
1216 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1217 * system to become idle at a time and cut off its timer ticks. This is meant
1218 * to test the support for such tickless idle CPU in RCU.
1221 rcu_torture_shuffle(void *arg)
1223 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1225 schedule_timeout_interruptible(shuffle_interval * HZ);
1226 rcu_torture_shuffle_tasks();
1227 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1228 } while (!kthread_should_stop());
1229 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1233 /* Cause the rcutorture test to "stutter", starting and stopping all
1234 * threads periodically.
1237 rcu_torture_stutter(void *arg)
1239 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1241 schedule_timeout_interruptible(stutter * HZ);
1242 stutter_pause_test = 1;
1243 if (!kthread_should_stop())
1244 schedule_timeout_interruptible(stutter * HZ);
1245 stutter_pause_test = 0;
1246 rcutorture_shutdown_absorb("rcu_torture_stutter");
1247 } while (!kthread_should_stop());
1248 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1253 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1255 printk(KERN_ALERT "%s" TORTURE_FLAG
1256 "--- %s: nreaders=%d nfakewriters=%d "
1257 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1258 "shuffle_interval=%d stutter=%d irqreader=%d "
1259 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1260 "test_boost=%d/%d test_boost_interval=%d "
1261 "test_boost_duration=%d shutdown_secs=%d\n",
1262 torture_type, tag, nrealreaders, nfakewriters,
1263 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1264 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1265 test_boost, cur_ops->can_boost,
1266 test_boost_interval, test_boost_duration, shutdown_secs);
1269 static struct notifier_block rcutorture_shutdown_nb = {
1270 .notifier_call = rcutorture_shutdown_notify,
1273 static void rcutorture_booster_cleanup(int cpu)
1275 struct task_struct *t;
1277 if (boost_tasks[cpu] == NULL)
1279 mutex_lock(&boost_mutex);
1280 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1281 t = boost_tasks[cpu];
1282 boost_tasks[cpu] = NULL;
1283 mutex_unlock(&boost_mutex);
1285 /* This must be outside of the mutex, otherwise deadlock! */
1289 static int rcutorture_booster_init(int cpu)
1293 if (boost_tasks[cpu] != NULL)
1294 return 0; /* Already created, nothing more to do. */
1296 /* Don't allow time recalculation while creating a new task. */
1297 mutex_lock(&boost_mutex);
1298 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1299 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1301 "rcu_torture_boost");
1302 if (IS_ERR(boost_tasks[cpu])) {
1303 retval = PTR_ERR(boost_tasks[cpu]);
1304 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1305 n_rcu_torture_boost_ktrerror++;
1306 boost_tasks[cpu] = NULL;
1307 mutex_unlock(&boost_mutex);
1310 kthread_bind(boost_tasks[cpu], cpu);
1311 wake_up_process(boost_tasks[cpu]);
1312 mutex_unlock(&boost_mutex);
1317 * Cause the rcutorture test to shutdown the system after the test has
1318 * run for the time specified by the shutdown_secs module parameter.
1321 rcu_torture_shutdown(void *arg)
1324 unsigned long jiffies_snap;
1326 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1327 jiffies_snap = ACCESS_ONCE(jiffies);
1328 while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1329 !kthread_should_stop()) {
1330 delta = shutdown_time - jiffies_snap;
1332 printk(KERN_ALERT "%s" TORTURE_FLAG
1333 "rcu_torture_shutdown task: %lu "
1334 "jiffies remaining\n",
1335 torture_type, delta);
1336 schedule_timeout_interruptible(delta);
1337 jiffies_snap = ACCESS_ONCE(jiffies);
1339 if (ULONG_CMP_LT(jiffies, shutdown_time)) {
1340 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1344 /* OK, shut down the system. */
1346 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1347 shutdown_task = NULL; /* Avoid self-kill deadlock. */
1348 rcu_torture_cleanup(); /* Get the success/failure message. */
1349 kernel_power_off(); /* Shut down the system. */
1353 static int rcutorture_cpu_notify(struct notifier_block *self,
1354 unsigned long action, void *hcpu)
1356 long cpu = (long)hcpu;
1360 case CPU_DOWN_FAILED:
1361 (void)rcutorture_booster_init(cpu);
1363 case CPU_DOWN_PREPARE:
1364 rcutorture_booster_cleanup(cpu);
1372 static struct notifier_block rcutorture_cpu_nb = {
1373 .notifier_call = rcutorture_cpu_notify,
1377 rcu_torture_cleanup(void)
1381 mutex_lock(&fullstop_mutex);
1382 rcutorture_record_test_transition();
1383 if (fullstop == FULLSTOP_SHUTDOWN) {
1384 printk(KERN_WARNING /* but going down anyway, so... */
1385 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1386 mutex_unlock(&fullstop_mutex);
1387 schedule_timeout_uninterruptible(10);
1388 if (cur_ops->cb_barrier != NULL)
1389 cur_ops->cb_barrier();
1392 fullstop = FULLSTOP_RMMOD;
1393 mutex_unlock(&fullstop_mutex);
1394 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1396 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1397 kthread_stop(stutter_task);
1399 stutter_task = NULL;
1400 if (shuffler_task) {
1401 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1402 kthread_stop(shuffler_task);
1403 free_cpumask_var(shuffle_tmp_mask);
1405 shuffler_task = NULL;
1408 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1409 kthread_stop(writer_task);
1414 for (i = 0; i < nrealreaders; i++) {
1415 if (reader_tasks[i]) {
1416 VERBOSE_PRINTK_STRING(
1417 "Stopping rcu_torture_reader task");
1418 kthread_stop(reader_tasks[i]);
1420 reader_tasks[i] = NULL;
1422 kfree(reader_tasks);
1423 reader_tasks = NULL;
1425 rcu_torture_current = NULL;
1427 if (fakewriter_tasks) {
1428 for (i = 0; i < nfakewriters; i++) {
1429 if (fakewriter_tasks[i]) {
1430 VERBOSE_PRINTK_STRING(
1431 "Stopping rcu_torture_fakewriter task");
1432 kthread_stop(fakewriter_tasks[i]);
1434 fakewriter_tasks[i] = NULL;
1436 kfree(fakewriter_tasks);
1437 fakewriter_tasks = NULL;
1441 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1442 kthread_stop(stats_task);
1447 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1448 kthread_stop(fqs_task);
1451 if ((test_boost == 1 && cur_ops->can_boost) ||
1453 unregister_cpu_notifier(&rcutorture_cpu_nb);
1454 for_each_possible_cpu(i)
1455 rcutorture_booster_cleanup(i);
1457 if (shutdown_task != NULL) {
1458 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1459 kthread_stop(shutdown_task);
1462 /* Wait for all RCU callbacks to fire. */
1464 if (cur_ops->cb_barrier != NULL)
1465 cur_ops->cb_barrier();
1467 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1469 if (cur_ops->cleanup)
1471 if (atomic_read(&n_rcu_torture_error))
1472 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1474 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1478 rcu_torture_init(void)
1483 static struct rcu_torture_ops *torture_ops[] =
1484 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1485 &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1486 &srcu_ops, &srcu_expedited_ops,
1487 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1489 mutex_lock(&fullstop_mutex);
1491 /* Process args and tell the world that the torturer is on the job. */
1492 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1493 cur_ops = torture_ops[i];
1494 if (strcmp(torture_type, cur_ops->name) == 0)
1497 if (i == ARRAY_SIZE(torture_ops)) {
1498 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1500 printk(KERN_ALERT "rcu-torture types:");
1501 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1502 printk(KERN_ALERT " %s", torture_ops[i]->name);
1503 printk(KERN_ALERT "\n");
1504 mutex_unlock(&fullstop_mutex);
1507 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1508 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1509 "fqs_duration, fqs disabled.\n");
1513 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1516 nrealreaders = nreaders;
1518 nrealreaders = 2 * num_online_cpus();
1519 rcu_torture_print_module_parms(cur_ops, "Start of test");
1520 fullstop = FULLSTOP_DONTSTOP;
1522 /* Set up the freelist. */
1524 INIT_LIST_HEAD(&rcu_torture_freelist);
1525 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1526 rcu_tortures[i].rtort_mbtest = 0;
1527 list_add_tail(&rcu_tortures[i].rtort_free,
1528 &rcu_torture_freelist);
1531 /* Initialize the statistics so that each run gets its own numbers. */
1533 rcu_torture_current = NULL;
1534 rcu_torture_current_version = 0;
1535 atomic_set(&n_rcu_torture_alloc, 0);
1536 atomic_set(&n_rcu_torture_alloc_fail, 0);
1537 atomic_set(&n_rcu_torture_free, 0);
1538 atomic_set(&n_rcu_torture_mberror, 0);
1539 atomic_set(&n_rcu_torture_error, 0);
1540 n_rcu_torture_boost_ktrerror = 0;
1541 n_rcu_torture_boost_rterror = 0;
1542 n_rcu_torture_boost_failure = 0;
1543 n_rcu_torture_boosts = 0;
1544 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1545 atomic_set(&rcu_torture_wcount[i], 0);
1546 for_each_possible_cpu(cpu) {
1547 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1548 per_cpu(rcu_torture_count, cpu)[i] = 0;
1549 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1553 /* Start up the kthreads. */
1555 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1556 writer_task = kthread_run(rcu_torture_writer, NULL,
1557 "rcu_torture_writer");
1558 if (IS_ERR(writer_task)) {
1559 firsterr = PTR_ERR(writer_task);
1560 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1564 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1566 if (fakewriter_tasks == NULL) {
1567 VERBOSE_PRINTK_ERRSTRING("out of memory");
1571 for (i = 0; i < nfakewriters; i++) {
1572 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1573 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1574 "rcu_torture_fakewriter");
1575 if (IS_ERR(fakewriter_tasks[i])) {
1576 firsterr = PTR_ERR(fakewriter_tasks[i]);
1577 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1578 fakewriter_tasks[i] = NULL;
1582 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1584 if (reader_tasks == NULL) {
1585 VERBOSE_PRINTK_ERRSTRING("out of memory");
1589 for (i = 0; i < nrealreaders; i++) {
1590 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1591 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1592 "rcu_torture_reader");
1593 if (IS_ERR(reader_tasks[i])) {
1594 firsterr = PTR_ERR(reader_tasks[i]);
1595 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1596 reader_tasks[i] = NULL;
1600 if (stat_interval > 0) {
1601 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1602 stats_task = kthread_run(rcu_torture_stats, NULL,
1603 "rcu_torture_stats");
1604 if (IS_ERR(stats_task)) {
1605 firsterr = PTR_ERR(stats_task);
1606 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1611 if (test_no_idle_hz) {
1612 rcu_idle_cpu = num_online_cpus() - 1;
1614 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1616 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1620 /* Create the shuffler thread */
1621 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1622 "rcu_torture_shuffle");
1623 if (IS_ERR(shuffler_task)) {
1624 free_cpumask_var(shuffle_tmp_mask);
1625 firsterr = PTR_ERR(shuffler_task);
1626 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1627 shuffler_task = NULL;
1634 /* Create the stutter thread */
1635 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1636 "rcu_torture_stutter");
1637 if (IS_ERR(stutter_task)) {
1638 firsterr = PTR_ERR(stutter_task);
1639 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1640 stutter_task = NULL;
1644 if (fqs_duration < 0)
1647 /* Create the stutter thread */
1648 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1650 if (IS_ERR(fqs_task)) {
1651 firsterr = PTR_ERR(fqs_task);
1652 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1657 if (test_boost_interval < 1)
1658 test_boost_interval = 1;
1659 if (test_boost_duration < 2)
1660 test_boost_duration = 2;
1661 if ((test_boost == 1 && cur_ops->can_boost) ||
1665 boost_starttime = jiffies + test_boost_interval * HZ;
1666 register_cpu_notifier(&rcutorture_cpu_nb);
1667 for_each_possible_cpu(i) {
1668 if (cpu_is_offline(i))
1669 continue; /* Heuristic: CPU can go offline. */
1670 retval = rcutorture_booster_init(i);
1677 if (shutdown_secs > 0) {
1678 shutdown_time = jiffies + shutdown_secs * HZ;
1679 shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
1680 "rcu_torture_shutdown");
1681 if (IS_ERR(shutdown_task)) {
1682 firsterr = PTR_ERR(shutdown_task);
1683 VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
1684 shutdown_task = NULL;
1688 register_reboot_notifier(&rcutorture_shutdown_nb);
1689 rcutorture_record_test_transition();
1690 mutex_unlock(&fullstop_mutex);
1694 mutex_unlock(&fullstop_mutex);
1695 rcu_torture_cleanup();
1699 module_init(rcu_torture_init);
1700 module_exit(rcu_torture_cleanup);