2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/freezer.h>
43 #include <linux/cpu.h>
44 #include <linux/delay.h>
45 #include <linux/byteorder/swabb.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>");
54 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
55 static int nfakewriters = 4; /* # fake writer threads */
56 static int stat_interval; /* Interval between stats, in seconds. */
57 /* Defaults to "only at end of test". */
58 static int verbose; /* Print more debug info. */
59 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
60 static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
61 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
63 module_param(nreaders, int, 0444);
64 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
65 module_param(nfakewriters, int, 0444);
66 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
67 module_param(stat_interval, int, 0444);
68 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
69 module_param(verbose, bool, 0444);
70 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
71 module_param(test_no_idle_hz, bool, 0444);
72 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
73 module_param(shuffle_interval, int, 0444);
74 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
75 module_param(torture_type, charp, 0444);
76 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
78 #define TORTURE_FLAG "-torture:"
79 #define PRINTK_STRING(s) \
80 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
81 #define VERBOSE_PRINTK_STRING(s) \
82 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
83 #define VERBOSE_PRINTK_ERRSTRING(s) \
84 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
86 static char printk_buf[4096];
88 static int nrealreaders;
89 static struct task_struct *writer_task;
90 static struct task_struct **fakewriter_tasks;
91 static struct task_struct **reader_tasks;
92 static struct task_struct *stats_task;
93 static struct task_struct *shuffler_task;
95 #define RCU_TORTURE_PIPE_LEN 10
98 struct rcu_head rtort_rcu;
100 struct list_head rtort_free;
104 static int fullstop = 0; /* stop generating callbacks at test end. */
105 static LIST_HEAD(rcu_torture_freelist);
106 static struct rcu_torture *rcu_torture_current = NULL;
107 static long rcu_torture_current_version = 0;
108 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
109 static DEFINE_SPINLOCK(rcu_torture_lock);
110 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
112 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
114 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
115 static atomic_t n_rcu_torture_alloc;
116 static atomic_t n_rcu_torture_alloc_fail;
117 static atomic_t n_rcu_torture_free;
118 static atomic_t n_rcu_torture_mberror;
119 static atomic_t n_rcu_torture_error;
120 static struct list_head rcu_torture_removed;
123 * Allocate an element from the rcu_tortures pool.
125 static struct rcu_torture *
126 rcu_torture_alloc(void)
130 spin_lock_bh(&rcu_torture_lock);
131 if (list_empty(&rcu_torture_freelist)) {
132 atomic_inc(&n_rcu_torture_alloc_fail);
133 spin_unlock_bh(&rcu_torture_lock);
136 atomic_inc(&n_rcu_torture_alloc);
137 p = rcu_torture_freelist.next;
139 spin_unlock_bh(&rcu_torture_lock);
140 return container_of(p, struct rcu_torture, rtort_free);
144 * Free an element to the rcu_tortures pool.
147 rcu_torture_free(struct rcu_torture *p)
149 atomic_inc(&n_rcu_torture_free);
150 spin_lock_bh(&rcu_torture_lock);
151 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
152 spin_unlock_bh(&rcu_torture_lock);
155 struct rcu_random_state {
156 unsigned long rrs_state;
160 #define RCU_RANDOM_MULT 39916801 /* prime */
161 #define RCU_RANDOM_ADD 479001701 /* prime */
162 #define RCU_RANDOM_REFRESH 10000
164 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
167 * Crude but fast random-number generator. Uses a linear congruential
168 * generator, with occasional help from cpu_clock().
171 rcu_random(struct rcu_random_state *rrsp)
173 if (--rrsp->rrs_count < 0) {
175 (unsigned long)cpu_clock(raw_smp_processor_id());
176 rrsp->rrs_count = RCU_RANDOM_REFRESH;
178 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
179 return swahw32(rrsp->rrs_state);
183 * Operations vector for selecting different types of tests.
186 struct rcu_torture_ops {
188 void (*cleanup)(void);
189 int (*readlock)(void);
190 void (*readdelay)(struct rcu_random_state *rrsp);
191 void (*readunlock)(int idx);
192 int (*completed)(void);
193 void (*deferredfree)(struct rcu_torture *p);
195 void (*cb_barrier)(void);
196 int (*stats)(char *page);
199 static struct rcu_torture_ops *cur_ops = NULL;
202 * Definitions for rcu torture testing.
205 static int rcu_torture_read_lock(void) __acquires(RCU)
211 static void rcu_read_delay(struct rcu_random_state *rrsp)
214 const long longdelay = 200;
216 /* We want there to be long-running readers, but not all the time. */
218 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
223 static void rcu_torture_read_unlock(int idx) __releases(RCU)
228 static int rcu_torture_completed(void)
230 return rcu_batches_completed();
234 rcu_torture_cb(struct rcu_head *p)
237 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
240 /* Test is ending, just drop callbacks on the floor. */
241 /* The next initialization will pick up the pieces. */
244 i = rp->rtort_pipe_count;
245 if (i > RCU_TORTURE_PIPE_LEN)
246 i = RCU_TORTURE_PIPE_LEN;
247 atomic_inc(&rcu_torture_wcount[i]);
248 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
249 rp->rtort_mbtest = 0;
250 rcu_torture_free(rp);
252 cur_ops->deferredfree(rp);
255 static void rcu_torture_deferred_free(struct rcu_torture *p)
257 call_rcu(&p->rtort_rcu, rcu_torture_cb);
260 static struct rcu_torture_ops rcu_ops = {
263 .readlock = rcu_torture_read_lock,
264 .readdelay = rcu_read_delay,
265 .readunlock = rcu_torture_read_unlock,
266 .completed = rcu_torture_completed,
267 .deferredfree = rcu_torture_deferred_free,
268 .sync = synchronize_rcu,
269 .cb_barrier = rcu_barrier,
274 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
277 struct rcu_torture *rp;
278 struct rcu_torture *rp1;
281 list_add(&p->rtort_free, &rcu_torture_removed);
282 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
283 i = rp->rtort_pipe_count;
284 if (i > RCU_TORTURE_PIPE_LEN)
285 i = RCU_TORTURE_PIPE_LEN;
286 atomic_inc(&rcu_torture_wcount[i]);
287 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
288 rp->rtort_mbtest = 0;
289 list_del(&rp->rtort_free);
290 rcu_torture_free(rp);
295 static void rcu_sync_torture_init(void)
297 INIT_LIST_HEAD(&rcu_torture_removed);
300 static struct rcu_torture_ops rcu_sync_ops = {
301 .init = rcu_sync_torture_init,
303 .readlock = rcu_torture_read_lock,
304 .readdelay = rcu_read_delay,
305 .readunlock = rcu_torture_read_unlock,
306 .completed = rcu_torture_completed,
307 .deferredfree = rcu_sync_torture_deferred_free,
308 .sync = synchronize_rcu,
315 * Definitions for rcu_bh torture testing.
318 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
324 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
326 rcu_read_unlock_bh();
329 static int rcu_bh_torture_completed(void)
331 return rcu_batches_completed_bh();
334 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
336 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
339 struct rcu_bh_torture_synchronize {
340 struct rcu_head head;
341 struct completion completion;
344 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
346 struct rcu_bh_torture_synchronize *rcu;
348 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
349 complete(&rcu->completion);
352 static void rcu_bh_torture_synchronize(void)
354 struct rcu_bh_torture_synchronize rcu;
356 init_completion(&rcu.completion);
357 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
358 wait_for_completion(&rcu.completion);
361 static struct rcu_torture_ops rcu_bh_ops = {
364 .readlock = rcu_bh_torture_read_lock,
365 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
366 .readunlock = rcu_bh_torture_read_unlock,
367 .completed = rcu_bh_torture_completed,
368 .deferredfree = rcu_bh_torture_deferred_free,
369 .sync = rcu_bh_torture_synchronize,
370 .cb_barrier = rcu_barrier_bh,
375 static struct rcu_torture_ops rcu_bh_sync_ops = {
376 .init = rcu_sync_torture_init,
378 .readlock = rcu_bh_torture_read_lock,
379 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
380 .readunlock = rcu_bh_torture_read_unlock,
381 .completed = rcu_bh_torture_completed,
382 .deferredfree = rcu_sync_torture_deferred_free,
383 .sync = rcu_bh_torture_synchronize,
386 .name = "rcu_bh_sync"
390 * Definitions for srcu torture testing.
393 static struct srcu_struct srcu_ctl;
395 static void srcu_torture_init(void)
397 init_srcu_struct(&srcu_ctl);
398 rcu_sync_torture_init();
401 static void srcu_torture_cleanup(void)
403 synchronize_srcu(&srcu_ctl);
404 cleanup_srcu_struct(&srcu_ctl);
407 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
409 return srcu_read_lock(&srcu_ctl);
412 static void srcu_read_delay(struct rcu_random_state *rrsp)
415 const long uspertick = 1000000 / HZ;
416 const long longdelay = 10;
418 /* We want there to be long-running readers, but not all the time. */
420 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
422 schedule_timeout_interruptible(longdelay);
425 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
427 srcu_read_unlock(&srcu_ctl, idx);
430 static int srcu_torture_completed(void)
432 return srcu_batches_completed(&srcu_ctl);
435 static void srcu_torture_synchronize(void)
437 synchronize_srcu(&srcu_ctl);
440 static int srcu_torture_stats(char *page)
444 int idx = srcu_ctl.completed & 0x1;
446 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
447 torture_type, TORTURE_FLAG, idx);
448 for_each_possible_cpu(cpu) {
449 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
450 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
451 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
453 cnt += sprintf(&page[cnt], "\n");
457 static struct rcu_torture_ops srcu_ops = {
458 .init = srcu_torture_init,
459 .cleanup = srcu_torture_cleanup,
460 .readlock = srcu_torture_read_lock,
461 .readdelay = srcu_read_delay,
462 .readunlock = srcu_torture_read_unlock,
463 .completed = srcu_torture_completed,
464 .deferredfree = rcu_sync_torture_deferred_free,
465 .sync = srcu_torture_synchronize,
467 .stats = srcu_torture_stats,
472 * Definitions for sched torture testing.
475 static int sched_torture_read_lock(void)
481 static void sched_torture_read_unlock(int idx)
486 static int sched_torture_completed(void)
491 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
493 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
496 static void sched_torture_synchronize(void)
501 static struct rcu_torture_ops sched_ops = {
502 .init = rcu_sync_torture_init,
504 .readlock = sched_torture_read_lock,
505 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
506 .readunlock = sched_torture_read_unlock,
507 .completed = sched_torture_completed,
508 .deferredfree = rcu_sched_torture_deferred_free,
509 .sync = sched_torture_synchronize,
510 .cb_barrier = rcu_barrier_sched,
515 static struct rcu_torture_ops sched_ops_sync = {
516 .init = rcu_sync_torture_init,
518 .readlock = sched_torture_read_lock,
519 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
520 .readunlock = sched_torture_read_unlock,
521 .completed = sched_torture_completed,
522 .deferredfree = rcu_sync_torture_deferred_free,
523 .sync = sched_torture_synchronize,
530 * RCU torture writer kthread. Repeatedly substitutes a new structure
531 * for that pointed to by rcu_torture_current, freeing the old structure
532 * after a series of grace periods (the "pipeline").
535 rcu_torture_writer(void *arg)
538 long oldbatch = rcu_batches_completed();
539 struct rcu_torture *rp;
540 struct rcu_torture *old_rp;
541 static DEFINE_RCU_RANDOM(rand);
543 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
544 set_user_nice(current, 19);
547 schedule_timeout_uninterruptible(1);
548 if ((rp = rcu_torture_alloc()) == NULL)
550 rp->rtort_pipe_count = 0;
551 udelay(rcu_random(&rand) & 0x3ff);
552 old_rp = rcu_torture_current;
553 rp->rtort_mbtest = 1;
554 rcu_assign_pointer(rcu_torture_current, rp);
557 i = old_rp->rtort_pipe_count;
558 if (i > RCU_TORTURE_PIPE_LEN)
559 i = RCU_TORTURE_PIPE_LEN;
560 atomic_inc(&rcu_torture_wcount[i]);
561 old_rp->rtort_pipe_count++;
562 cur_ops->deferredfree(old_rp);
564 rcu_torture_current_version++;
565 oldbatch = cur_ops->completed();
566 } while (!kthread_should_stop() && !fullstop);
567 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
568 while (!kthread_should_stop())
569 schedule_timeout_uninterruptible(1);
574 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
575 * delay between calls.
578 rcu_torture_fakewriter(void *arg)
580 DEFINE_RCU_RANDOM(rand);
582 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
583 set_user_nice(current, 19);
586 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
587 udelay(rcu_random(&rand) & 0x3ff);
589 } while (!kthread_should_stop() && !fullstop);
591 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
592 while (!kthread_should_stop())
593 schedule_timeout_uninterruptible(1);
598 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
599 * incrementing the corresponding element of the pipeline array. The
600 * counter in the element should never be greater than 1, otherwise, the
601 * RCU implementation is broken.
604 rcu_torture_reader(void *arg)
608 DEFINE_RCU_RANDOM(rand);
609 struct rcu_torture *p;
612 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
613 set_user_nice(current, 19);
616 idx = cur_ops->readlock();
617 completed = cur_ops->completed();
618 p = rcu_dereference(rcu_torture_current);
620 /* Wait for rcu_torture_writer to get underway */
621 cur_ops->readunlock(idx);
622 schedule_timeout_interruptible(HZ);
625 if (p->rtort_mbtest == 0)
626 atomic_inc(&n_rcu_torture_mberror);
627 cur_ops->readdelay(&rand);
629 pipe_count = p->rtort_pipe_count;
630 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
631 /* Should not happen, but... */
632 pipe_count = RCU_TORTURE_PIPE_LEN;
634 ++__get_cpu_var(rcu_torture_count)[pipe_count];
635 completed = cur_ops->completed() - completed;
636 if (completed > RCU_TORTURE_PIPE_LEN) {
637 /* Should not happen, but... */
638 completed = RCU_TORTURE_PIPE_LEN;
640 ++__get_cpu_var(rcu_torture_batch)[completed];
642 cur_ops->readunlock(idx);
644 } while (!kthread_should_stop() && !fullstop);
645 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
646 while (!kthread_should_stop())
647 schedule_timeout_uninterruptible(1);
652 * Create an RCU-torture statistics message in the specified buffer.
655 rcu_torture_printk(char *page)
660 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
661 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
663 for_each_possible_cpu(cpu) {
664 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
665 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
666 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
669 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
670 if (pipesummary[i] != 0)
673 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
674 cnt += sprintf(&page[cnt],
675 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
678 rcu_torture_current_version,
679 list_empty(&rcu_torture_freelist),
680 atomic_read(&n_rcu_torture_alloc),
681 atomic_read(&n_rcu_torture_alloc_fail),
682 atomic_read(&n_rcu_torture_free),
683 atomic_read(&n_rcu_torture_mberror));
684 if (atomic_read(&n_rcu_torture_mberror) != 0)
685 cnt += sprintf(&page[cnt], " !!!");
686 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
688 cnt += sprintf(&page[cnt], "!!! ");
689 atomic_inc(&n_rcu_torture_error);
692 cnt += sprintf(&page[cnt], "Reader Pipe: ");
693 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
694 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
695 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
696 cnt += sprintf(&page[cnt], "Reader Batch: ");
697 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
698 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
699 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
700 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
701 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
702 cnt += sprintf(&page[cnt], " %d",
703 atomic_read(&rcu_torture_wcount[i]));
705 cnt += sprintf(&page[cnt], "\n");
707 cnt += cur_ops->stats(&page[cnt]);
712 * Print torture statistics. Caller must ensure that there is only
713 * one call to this function at a given time!!! This is normally
714 * accomplished by relying on the module system to only have one copy
715 * of the module loaded, and then by giving the rcu_torture_stats
716 * kthread full control (or the init/cleanup functions when rcu_torture_stats
717 * thread is not running).
720 rcu_torture_stats_print(void)
724 cnt = rcu_torture_printk(printk_buf);
725 printk(KERN_ALERT "%s", printk_buf);
729 * Periodically prints torture statistics, if periodic statistics printing
730 * was specified via the stat_interval module parameter.
732 * No need to worry about fullstop here, since this one doesn't reference
733 * volatile state or register callbacks.
736 rcu_torture_stats(void *arg)
738 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
740 schedule_timeout_interruptible(stat_interval * HZ);
741 rcu_torture_stats_print();
742 } while (!kthread_should_stop());
743 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
747 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
749 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
750 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
752 static void rcu_torture_shuffle_tasks(void)
757 cpus_setall(tmp_mask);
760 /* No point in shuffling if there is only one online CPU (ex: UP) */
761 if (num_online_cpus() == 1) {
766 if (rcu_idle_cpu != -1)
767 cpu_clear(rcu_idle_cpu, tmp_mask);
769 set_cpus_allowed_ptr(current, &tmp_mask);
772 for (i = 0; i < nrealreaders; i++)
774 set_cpus_allowed_ptr(reader_tasks[i],
778 if (fakewriter_tasks) {
779 for (i = 0; i < nfakewriters; i++)
780 if (fakewriter_tasks[i])
781 set_cpus_allowed_ptr(fakewriter_tasks[i],
786 set_cpus_allowed_ptr(writer_task, &tmp_mask);
789 set_cpus_allowed_ptr(stats_task, &tmp_mask);
791 if (rcu_idle_cpu == -1)
792 rcu_idle_cpu = num_online_cpus() - 1;
799 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
800 * system to become idle at a time and cut off its timer ticks. This is meant
801 * to test the support for such tickless idle CPU in RCU.
804 rcu_torture_shuffle(void *arg)
806 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
808 schedule_timeout_interruptible(shuffle_interval * HZ);
809 rcu_torture_shuffle_tasks();
810 } while (!kthread_should_stop());
811 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
816 rcu_torture_print_module_parms(char *tag)
818 printk(KERN_ALERT "%s" TORTURE_FLAG
819 "--- %s: nreaders=%d nfakewriters=%d "
820 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
821 "shuffle_interval = %d\n",
822 torture_type, tag, nrealreaders, nfakewriters,
823 stat_interval, verbose, test_no_idle_hz, shuffle_interval);
827 rcu_torture_cleanup(void)
833 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
834 kthread_stop(shuffler_task);
836 shuffler_task = NULL;
839 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
840 kthread_stop(writer_task);
845 for (i = 0; i < nrealreaders; i++) {
846 if (reader_tasks[i]) {
847 VERBOSE_PRINTK_STRING(
848 "Stopping rcu_torture_reader task");
849 kthread_stop(reader_tasks[i]);
851 reader_tasks[i] = NULL;
856 rcu_torture_current = NULL;
858 if (fakewriter_tasks) {
859 for (i = 0; i < nfakewriters; i++) {
860 if (fakewriter_tasks[i]) {
861 VERBOSE_PRINTK_STRING(
862 "Stopping rcu_torture_fakewriter task");
863 kthread_stop(fakewriter_tasks[i]);
865 fakewriter_tasks[i] = NULL;
867 kfree(fakewriter_tasks);
868 fakewriter_tasks = NULL;
872 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
873 kthread_stop(stats_task);
877 /* Wait for all RCU callbacks to fire. */
879 if (cur_ops->cb_barrier != NULL)
880 cur_ops->cb_barrier();
882 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
884 if (cur_ops->cleanup)
886 if (atomic_read(&n_rcu_torture_error))
887 rcu_torture_print_module_parms("End of test: FAILURE");
889 rcu_torture_print_module_parms("End of test: SUCCESS");
893 rcu_torture_init(void)
898 static struct rcu_torture_ops *torture_ops[] =
899 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
900 &srcu_ops, &sched_ops, &sched_ops_sync, };
902 /* Process args and tell the world that the torturer is on the job. */
903 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
904 cur_ops = torture_ops[i];
905 if (strcmp(torture_type, cur_ops->name) == 0)
908 if (i == ARRAY_SIZE(torture_ops)) {
909 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
914 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
917 nrealreaders = nreaders;
919 nrealreaders = 2 * num_online_cpus();
920 rcu_torture_print_module_parms("Start of test");
923 /* Set up the freelist. */
925 INIT_LIST_HEAD(&rcu_torture_freelist);
926 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
927 rcu_tortures[i].rtort_mbtest = 0;
928 list_add_tail(&rcu_tortures[i].rtort_free,
929 &rcu_torture_freelist);
932 /* Initialize the statistics so that each run gets its own numbers. */
934 rcu_torture_current = NULL;
935 rcu_torture_current_version = 0;
936 atomic_set(&n_rcu_torture_alloc, 0);
937 atomic_set(&n_rcu_torture_alloc_fail, 0);
938 atomic_set(&n_rcu_torture_free, 0);
939 atomic_set(&n_rcu_torture_mberror, 0);
940 atomic_set(&n_rcu_torture_error, 0);
941 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
942 atomic_set(&rcu_torture_wcount[i], 0);
943 for_each_possible_cpu(cpu) {
944 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
945 per_cpu(rcu_torture_count, cpu)[i] = 0;
946 per_cpu(rcu_torture_batch, cpu)[i] = 0;
950 /* Start up the kthreads. */
952 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
953 writer_task = kthread_run(rcu_torture_writer, NULL,
954 "rcu_torture_writer");
955 if (IS_ERR(writer_task)) {
956 firsterr = PTR_ERR(writer_task);
957 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
961 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
963 if (fakewriter_tasks == NULL) {
964 VERBOSE_PRINTK_ERRSTRING("out of memory");
968 for (i = 0; i < nfakewriters; i++) {
969 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
970 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
971 "rcu_torture_fakewriter");
972 if (IS_ERR(fakewriter_tasks[i])) {
973 firsterr = PTR_ERR(fakewriter_tasks[i]);
974 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
975 fakewriter_tasks[i] = NULL;
979 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
981 if (reader_tasks == NULL) {
982 VERBOSE_PRINTK_ERRSTRING("out of memory");
986 for (i = 0; i < nrealreaders; i++) {
987 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
988 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
989 "rcu_torture_reader");
990 if (IS_ERR(reader_tasks[i])) {
991 firsterr = PTR_ERR(reader_tasks[i]);
992 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
993 reader_tasks[i] = NULL;
997 if (stat_interval > 0) {
998 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
999 stats_task = kthread_run(rcu_torture_stats, NULL,
1000 "rcu_torture_stats");
1001 if (IS_ERR(stats_task)) {
1002 firsterr = PTR_ERR(stats_task);
1003 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1008 if (test_no_idle_hz) {
1009 rcu_idle_cpu = num_online_cpus() - 1;
1010 /* Create the shuffler thread */
1011 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1012 "rcu_torture_shuffle");
1013 if (IS_ERR(shuffler_task)) {
1014 firsterr = PTR_ERR(shuffler_task);
1015 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1016 shuffler_task = NULL;
1023 rcu_torture_cleanup();
1027 module_init(rcu_torture_init);
1028 module_exit(rcu_torture_cleanup);