2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/freezer.h>
43 #include <linux/cpu.h>
44 #include <linux/delay.h>
45 #include <linux/byteorder/swabb.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>");
54 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
55 static int nfakewriters = 4; /* # fake writer threads */
56 static int stat_interval; /* Interval between stats, in seconds. */
57 /* Defaults to "only at end of test". */
58 static int verbose; /* Print more debug info. */
59 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
60 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
61 static int stutter = 5; /* Start/stop testing interval (in sec) */
62 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
64 module_param(nreaders, int, 0444);
65 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
66 module_param(nfakewriters, int, 0444);
67 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
68 module_param(stat_interval, int, 0444);
69 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
70 module_param(verbose, bool, 0444);
71 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
72 module_param(test_no_idle_hz, bool, 0444);
73 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
74 module_param(shuffle_interval, int, 0444);
75 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
76 module_param(stutter, int, 0444);
77 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
78 module_param(torture_type, charp, 0444);
79 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
81 #define TORTURE_FLAG "-torture:"
82 #define PRINTK_STRING(s) \
83 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
84 #define VERBOSE_PRINTK_STRING(s) \
85 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
86 #define VERBOSE_PRINTK_ERRSTRING(s) \
87 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
89 static char printk_buf[4096];
91 static int nrealreaders;
92 static struct task_struct *writer_task;
93 static struct task_struct **fakewriter_tasks;
94 static struct task_struct **reader_tasks;
95 static struct task_struct *stats_task;
96 static struct task_struct *shuffler_task;
97 static struct task_struct *stutter_task;
99 #define RCU_TORTURE_PIPE_LEN 10
102 struct rcu_head rtort_rcu;
103 int rtort_pipe_count;
104 struct list_head rtort_free;
108 static int fullstop = 0; /* stop generating callbacks at test end. */
109 static LIST_HEAD(rcu_torture_freelist);
110 static struct rcu_torture *rcu_torture_current = NULL;
111 static long rcu_torture_current_version = 0;
112 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
113 static DEFINE_SPINLOCK(rcu_torture_lock);
114 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
116 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
118 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
119 static atomic_t n_rcu_torture_alloc;
120 static atomic_t n_rcu_torture_alloc_fail;
121 static atomic_t n_rcu_torture_free;
122 static atomic_t n_rcu_torture_mberror;
123 static atomic_t n_rcu_torture_error;
124 static struct list_head rcu_torture_removed;
126 static int stutter_pause_test = 0;
129 * Allocate an element from the rcu_tortures pool.
131 static struct rcu_torture *
132 rcu_torture_alloc(void)
136 spin_lock_bh(&rcu_torture_lock);
137 if (list_empty(&rcu_torture_freelist)) {
138 atomic_inc(&n_rcu_torture_alloc_fail);
139 spin_unlock_bh(&rcu_torture_lock);
142 atomic_inc(&n_rcu_torture_alloc);
143 p = rcu_torture_freelist.next;
145 spin_unlock_bh(&rcu_torture_lock);
146 return container_of(p, struct rcu_torture, rtort_free);
150 * Free an element to the rcu_tortures pool.
153 rcu_torture_free(struct rcu_torture *p)
155 atomic_inc(&n_rcu_torture_free);
156 spin_lock_bh(&rcu_torture_lock);
157 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
158 spin_unlock_bh(&rcu_torture_lock);
161 struct rcu_random_state {
162 unsigned long rrs_state;
166 #define RCU_RANDOM_MULT 39916801 /* prime */
167 #define RCU_RANDOM_ADD 479001701 /* prime */
168 #define RCU_RANDOM_REFRESH 10000
170 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
173 * Crude but fast random-number generator. Uses a linear congruential
174 * generator, with occasional help from cpu_clock().
177 rcu_random(struct rcu_random_state *rrsp)
179 if (--rrsp->rrs_count < 0) {
181 (unsigned long)cpu_clock(raw_smp_processor_id());
182 rrsp->rrs_count = RCU_RANDOM_REFRESH;
184 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
185 return swahw32(rrsp->rrs_state);
189 rcu_stutter_wait(void)
191 while (stutter_pause_test)
192 schedule_timeout_interruptible(1);
196 * Operations vector for selecting different types of tests.
199 struct rcu_torture_ops {
201 void (*cleanup)(void);
202 int (*readlock)(void);
203 void (*readdelay)(struct rcu_random_state *rrsp);
204 void (*readunlock)(int idx);
205 int (*completed)(void);
206 void (*deferredfree)(struct rcu_torture *p);
208 void (*cb_barrier)(void);
209 int (*stats)(char *page);
212 static struct rcu_torture_ops *cur_ops = NULL;
215 * Definitions for rcu torture testing.
218 static int rcu_torture_read_lock(void) __acquires(RCU)
224 static void rcu_read_delay(struct rcu_random_state *rrsp)
227 const long longdelay = 200;
229 /* We want there to be long-running readers, but not all the time. */
231 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
236 static void rcu_torture_read_unlock(int idx) __releases(RCU)
241 static int rcu_torture_completed(void)
243 return rcu_batches_completed();
247 rcu_torture_cb(struct rcu_head *p)
250 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
253 /* Test is ending, just drop callbacks on the floor. */
254 /* The next initialization will pick up the pieces. */
257 i = rp->rtort_pipe_count;
258 if (i > RCU_TORTURE_PIPE_LEN)
259 i = RCU_TORTURE_PIPE_LEN;
260 atomic_inc(&rcu_torture_wcount[i]);
261 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
262 rp->rtort_mbtest = 0;
263 rcu_torture_free(rp);
265 cur_ops->deferredfree(rp);
268 static void rcu_torture_deferred_free(struct rcu_torture *p)
270 call_rcu(&p->rtort_rcu, rcu_torture_cb);
273 static struct rcu_torture_ops rcu_ops = {
276 .readlock = rcu_torture_read_lock,
277 .readdelay = rcu_read_delay,
278 .readunlock = rcu_torture_read_unlock,
279 .completed = rcu_torture_completed,
280 .deferredfree = rcu_torture_deferred_free,
281 .sync = synchronize_rcu,
282 .cb_barrier = rcu_barrier,
287 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
290 struct rcu_torture *rp;
291 struct rcu_torture *rp1;
294 list_add(&p->rtort_free, &rcu_torture_removed);
295 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
296 i = rp->rtort_pipe_count;
297 if (i > RCU_TORTURE_PIPE_LEN)
298 i = RCU_TORTURE_PIPE_LEN;
299 atomic_inc(&rcu_torture_wcount[i]);
300 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
301 rp->rtort_mbtest = 0;
302 list_del(&rp->rtort_free);
303 rcu_torture_free(rp);
308 static void rcu_sync_torture_init(void)
310 INIT_LIST_HEAD(&rcu_torture_removed);
313 static struct rcu_torture_ops rcu_sync_ops = {
314 .init = rcu_sync_torture_init,
316 .readlock = rcu_torture_read_lock,
317 .readdelay = rcu_read_delay,
318 .readunlock = rcu_torture_read_unlock,
319 .completed = rcu_torture_completed,
320 .deferredfree = rcu_sync_torture_deferred_free,
321 .sync = synchronize_rcu,
328 * Definitions for rcu_bh torture testing.
331 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
337 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
339 rcu_read_unlock_bh();
342 static int rcu_bh_torture_completed(void)
344 return rcu_batches_completed_bh();
347 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
349 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
352 struct rcu_bh_torture_synchronize {
353 struct rcu_head head;
354 struct completion completion;
357 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
359 struct rcu_bh_torture_synchronize *rcu;
361 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
362 complete(&rcu->completion);
365 static void rcu_bh_torture_synchronize(void)
367 struct rcu_bh_torture_synchronize rcu;
369 init_completion(&rcu.completion);
370 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
371 wait_for_completion(&rcu.completion);
374 static struct rcu_torture_ops rcu_bh_ops = {
377 .readlock = rcu_bh_torture_read_lock,
378 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
379 .readunlock = rcu_bh_torture_read_unlock,
380 .completed = rcu_bh_torture_completed,
381 .deferredfree = rcu_bh_torture_deferred_free,
382 .sync = rcu_bh_torture_synchronize,
383 .cb_barrier = rcu_barrier_bh,
388 static struct rcu_torture_ops rcu_bh_sync_ops = {
389 .init = rcu_sync_torture_init,
391 .readlock = rcu_bh_torture_read_lock,
392 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
393 .readunlock = rcu_bh_torture_read_unlock,
394 .completed = rcu_bh_torture_completed,
395 .deferredfree = rcu_sync_torture_deferred_free,
396 .sync = rcu_bh_torture_synchronize,
399 .name = "rcu_bh_sync"
403 * Definitions for srcu torture testing.
406 static struct srcu_struct srcu_ctl;
408 static void srcu_torture_init(void)
410 init_srcu_struct(&srcu_ctl);
411 rcu_sync_torture_init();
414 static void srcu_torture_cleanup(void)
416 synchronize_srcu(&srcu_ctl);
417 cleanup_srcu_struct(&srcu_ctl);
420 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
422 return srcu_read_lock(&srcu_ctl);
425 static void srcu_read_delay(struct rcu_random_state *rrsp)
428 const long uspertick = 1000000 / HZ;
429 const long longdelay = 10;
431 /* We want there to be long-running readers, but not all the time. */
433 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
435 schedule_timeout_interruptible(longdelay);
438 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
440 srcu_read_unlock(&srcu_ctl, idx);
443 static int srcu_torture_completed(void)
445 return srcu_batches_completed(&srcu_ctl);
448 static void srcu_torture_synchronize(void)
450 synchronize_srcu(&srcu_ctl);
453 static int srcu_torture_stats(char *page)
457 int idx = srcu_ctl.completed & 0x1;
459 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
460 torture_type, TORTURE_FLAG, idx);
461 for_each_possible_cpu(cpu) {
462 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
463 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
464 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
466 cnt += sprintf(&page[cnt], "\n");
470 static struct rcu_torture_ops srcu_ops = {
471 .init = srcu_torture_init,
472 .cleanup = srcu_torture_cleanup,
473 .readlock = srcu_torture_read_lock,
474 .readdelay = srcu_read_delay,
475 .readunlock = srcu_torture_read_unlock,
476 .completed = srcu_torture_completed,
477 .deferredfree = rcu_sync_torture_deferred_free,
478 .sync = srcu_torture_synchronize,
480 .stats = srcu_torture_stats,
485 * Definitions for sched torture testing.
488 static int sched_torture_read_lock(void)
494 static void sched_torture_read_unlock(int idx)
499 static int sched_torture_completed(void)
504 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
506 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
509 static void sched_torture_synchronize(void)
514 static struct rcu_torture_ops sched_ops = {
515 .init = rcu_sync_torture_init,
517 .readlock = sched_torture_read_lock,
518 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
519 .readunlock = sched_torture_read_unlock,
520 .completed = sched_torture_completed,
521 .deferredfree = rcu_sched_torture_deferred_free,
522 .sync = sched_torture_synchronize,
523 .cb_barrier = rcu_barrier_sched,
528 static struct rcu_torture_ops sched_ops_sync = {
529 .init = rcu_sync_torture_init,
531 .readlock = sched_torture_read_lock,
532 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
533 .readunlock = sched_torture_read_unlock,
534 .completed = sched_torture_completed,
535 .deferredfree = rcu_sync_torture_deferred_free,
536 .sync = sched_torture_synchronize,
543 * RCU torture writer kthread. Repeatedly substitutes a new structure
544 * for that pointed to by rcu_torture_current, freeing the old structure
545 * after a series of grace periods (the "pipeline").
548 rcu_torture_writer(void *arg)
551 long oldbatch = rcu_batches_completed();
552 struct rcu_torture *rp;
553 struct rcu_torture *old_rp;
554 static DEFINE_RCU_RANDOM(rand);
556 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
557 set_user_nice(current, 19);
560 schedule_timeout_uninterruptible(1);
561 if ((rp = rcu_torture_alloc()) == NULL)
563 rp->rtort_pipe_count = 0;
564 udelay(rcu_random(&rand) & 0x3ff);
565 old_rp = rcu_torture_current;
566 rp->rtort_mbtest = 1;
567 rcu_assign_pointer(rcu_torture_current, rp);
570 i = old_rp->rtort_pipe_count;
571 if (i > RCU_TORTURE_PIPE_LEN)
572 i = RCU_TORTURE_PIPE_LEN;
573 atomic_inc(&rcu_torture_wcount[i]);
574 old_rp->rtort_pipe_count++;
575 cur_ops->deferredfree(old_rp);
577 rcu_torture_current_version++;
578 oldbatch = cur_ops->completed();
580 } while (!kthread_should_stop() && !fullstop);
581 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
582 while (!kthread_should_stop())
583 schedule_timeout_uninterruptible(1);
588 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
589 * delay between calls.
592 rcu_torture_fakewriter(void *arg)
594 DEFINE_RCU_RANDOM(rand);
596 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
597 set_user_nice(current, 19);
600 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
601 udelay(rcu_random(&rand) & 0x3ff);
604 } while (!kthread_should_stop() && !fullstop);
606 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
607 while (!kthread_should_stop())
608 schedule_timeout_uninterruptible(1);
613 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
614 * incrementing the corresponding element of the pipeline array. The
615 * counter in the element should never be greater than 1, otherwise, the
616 * RCU implementation is broken.
619 rcu_torture_reader(void *arg)
623 DEFINE_RCU_RANDOM(rand);
624 struct rcu_torture *p;
627 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
628 set_user_nice(current, 19);
631 idx = cur_ops->readlock();
632 completed = cur_ops->completed();
633 p = rcu_dereference(rcu_torture_current);
635 /* Wait for rcu_torture_writer to get underway */
636 cur_ops->readunlock(idx);
637 schedule_timeout_interruptible(HZ);
640 if (p->rtort_mbtest == 0)
641 atomic_inc(&n_rcu_torture_mberror);
642 cur_ops->readdelay(&rand);
644 pipe_count = p->rtort_pipe_count;
645 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
646 /* Should not happen, but... */
647 pipe_count = RCU_TORTURE_PIPE_LEN;
649 ++__get_cpu_var(rcu_torture_count)[pipe_count];
650 completed = cur_ops->completed() - completed;
651 if (completed > RCU_TORTURE_PIPE_LEN) {
652 /* Should not happen, but... */
653 completed = RCU_TORTURE_PIPE_LEN;
655 ++__get_cpu_var(rcu_torture_batch)[completed];
657 cur_ops->readunlock(idx);
660 } while (!kthread_should_stop() && !fullstop);
661 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
662 while (!kthread_should_stop())
663 schedule_timeout_uninterruptible(1);
668 * Create an RCU-torture statistics message in the specified buffer.
671 rcu_torture_printk(char *page)
676 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
677 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
679 for_each_possible_cpu(cpu) {
680 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
681 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
682 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
685 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
686 if (pipesummary[i] != 0)
689 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
690 cnt += sprintf(&page[cnt],
691 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
694 rcu_torture_current_version,
695 list_empty(&rcu_torture_freelist),
696 atomic_read(&n_rcu_torture_alloc),
697 atomic_read(&n_rcu_torture_alloc_fail),
698 atomic_read(&n_rcu_torture_free),
699 atomic_read(&n_rcu_torture_mberror));
700 if (atomic_read(&n_rcu_torture_mberror) != 0)
701 cnt += sprintf(&page[cnt], " !!!");
702 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
704 cnt += sprintf(&page[cnt], "!!! ");
705 atomic_inc(&n_rcu_torture_error);
708 cnt += sprintf(&page[cnt], "Reader Pipe: ");
709 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
710 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
711 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
712 cnt += sprintf(&page[cnt], "Reader Batch: ");
713 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
714 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
715 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
716 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
717 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
718 cnt += sprintf(&page[cnt], " %d",
719 atomic_read(&rcu_torture_wcount[i]));
721 cnt += sprintf(&page[cnt], "\n");
723 cnt += cur_ops->stats(&page[cnt]);
728 * Print torture statistics. Caller must ensure that there is only
729 * one call to this function at a given time!!! This is normally
730 * accomplished by relying on the module system to only have one copy
731 * of the module loaded, and then by giving the rcu_torture_stats
732 * kthread full control (or the init/cleanup functions when rcu_torture_stats
733 * thread is not running).
736 rcu_torture_stats_print(void)
740 cnt = rcu_torture_printk(printk_buf);
741 printk(KERN_ALERT "%s", printk_buf);
745 * Periodically prints torture statistics, if periodic statistics printing
746 * was specified via the stat_interval module parameter.
748 * No need to worry about fullstop here, since this one doesn't reference
749 * volatile state or register callbacks.
752 rcu_torture_stats(void *arg)
754 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
756 schedule_timeout_interruptible(stat_interval * HZ);
757 rcu_torture_stats_print();
758 } while (!kthread_should_stop());
759 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
763 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
765 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
766 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
768 static void rcu_torture_shuffle_tasks(void)
773 cpus_setall(tmp_mask);
776 /* No point in shuffling if there is only one online CPU (ex: UP) */
777 if (num_online_cpus() == 1) {
782 if (rcu_idle_cpu != -1)
783 cpu_clear(rcu_idle_cpu, tmp_mask);
785 set_cpus_allowed_ptr(current, &tmp_mask);
788 for (i = 0; i < nrealreaders; i++)
790 set_cpus_allowed_ptr(reader_tasks[i],
794 if (fakewriter_tasks) {
795 for (i = 0; i < nfakewriters; i++)
796 if (fakewriter_tasks[i])
797 set_cpus_allowed_ptr(fakewriter_tasks[i],
802 set_cpus_allowed_ptr(writer_task, &tmp_mask);
805 set_cpus_allowed_ptr(stats_task, &tmp_mask);
807 if (rcu_idle_cpu == -1)
808 rcu_idle_cpu = num_online_cpus() - 1;
815 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
816 * system to become idle at a time and cut off its timer ticks. This is meant
817 * to test the support for such tickless idle CPU in RCU.
820 rcu_torture_shuffle(void *arg)
822 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
824 schedule_timeout_interruptible(shuffle_interval * HZ);
825 rcu_torture_shuffle_tasks();
826 } while (!kthread_should_stop());
827 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
831 /* Cause the rcutorture test to "stutter", starting and stopping all
832 * threads periodically.
835 rcu_torture_stutter(void *arg)
837 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
839 schedule_timeout_interruptible(stutter * HZ);
840 stutter_pause_test = 1;
841 if (!kthread_should_stop())
842 schedule_timeout_interruptible(stutter * HZ);
843 stutter_pause_test = 0;
844 } while (!kthread_should_stop());
845 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
850 rcu_torture_print_module_parms(char *tag)
852 printk(KERN_ALERT "%s" TORTURE_FLAG
853 "--- %s: nreaders=%d nfakewriters=%d "
854 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
855 "shuffle_interval=%d stutter=%d\n",
856 torture_type, tag, nrealreaders, nfakewriters,
857 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
862 rcu_torture_cleanup(void)
868 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
869 kthread_stop(stutter_task);
873 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
874 kthread_stop(shuffler_task);
876 shuffler_task = NULL;
879 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
880 kthread_stop(writer_task);
885 for (i = 0; i < nrealreaders; i++) {
886 if (reader_tasks[i]) {
887 VERBOSE_PRINTK_STRING(
888 "Stopping rcu_torture_reader task");
889 kthread_stop(reader_tasks[i]);
891 reader_tasks[i] = NULL;
896 rcu_torture_current = NULL;
898 if (fakewriter_tasks) {
899 for (i = 0; i < nfakewriters; i++) {
900 if (fakewriter_tasks[i]) {
901 VERBOSE_PRINTK_STRING(
902 "Stopping rcu_torture_fakewriter task");
903 kthread_stop(fakewriter_tasks[i]);
905 fakewriter_tasks[i] = NULL;
907 kfree(fakewriter_tasks);
908 fakewriter_tasks = NULL;
912 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
913 kthread_stop(stats_task);
917 /* Wait for all RCU callbacks to fire. */
919 if (cur_ops->cb_barrier != NULL)
920 cur_ops->cb_barrier();
922 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
924 if (cur_ops->cleanup)
926 if (atomic_read(&n_rcu_torture_error))
927 rcu_torture_print_module_parms("End of test: FAILURE");
929 rcu_torture_print_module_parms("End of test: SUCCESS");
933 rcu_torture_init(void)
938 static struct rcu_torture_ops *torture_ops[] =
939 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
940 &srcu_ops, &sched_ops, &sched_ops_sync, };
942 /* Process args and tell the world that the torturer is on the job. */
943 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
944 cur_ops = torture_ops[i];
945 if (strcmp(torture_type, cur_ops->name) == 0)
948 if (i == ARRAY_SIZE(torture_ops)) {
949 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
954 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
957 nrealreaders = nreaders;
959 nrealreaders = 2 * num_online_cpus();
960 rcu_torture_print_module_parms("Start of test");
963 /* Set up the freelist. */
965 INIT_LIST_HEAD(&rcu_torture_freelist);
966 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
967 rcu_tortures[i].rtort_mbtest = 0;
968 list_add_tail(&rcu_tortures[i].rtort_free,
969 &rcu_torture_freelist);
972 /* Initialize the statistics so that each run gets its own numbers. */
974 rcu_torture_current = NULL;
975 rcu_torture_current_version = 0;
976 atomic_set(&n_rcu_torture_alloc, 0);
977 atomic_set(&n_rcu_torture_alloc_fail, 0);
978 atomic_set(&n_rcu_torture_free, 0);
979 atomic_set(&n_rcu_torture_mberror, 0);
980 atomic_set(&n_rcu_torture_error, 0);
981 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
982 atomic_set(&rcu_torture_wcount[i], 0);
983 for_each_possible_cpu(cpu) {
984 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
985 per_cpu(rcu_torture_count, cpu)[i] = 0;
986 per_cpu(rcu_torture_batch, cpu)[i] = 0;
990 /* Start up the kthreads. */
992 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
993 writer_task = kthread_run(rcu_torture_writer, NULL,
994 "rcu_torture_writer");
995 if (IS_ERR(writer_task)) {
996 firsterr = PTR_ERR(writer_task);
997 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1001 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1003 if (fakewriter_tasks == NULL) {
1004 VERBOSE_PRINTK_ERRSTRING("out of memory");
1008 for (i = 0; i < nfakewriters; i++) {
1009 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1010 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1011 "rcu_torture_fakewriter");
1012 if (IS_ERR(fakewriter_tasks[i])) {
1013 firsterr = PTR_ERR(fakewriter_tasks[i]);
1014 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1015 fakewriter_tasks[i] = NULL;
1019 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1021 if (reader_tasks == NULL) {
1022 VERBOSE_PRINTK_ERRSTRING("out of memory");
1026 for (i = 0; i < nrealreaders; i++) {
1027 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1028 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1029 "rcu_torture_reader");
1030 if (IS_ERR(reader_tasks[i])) {
1031 firsterr = PTR_ERR(reader_tasks[i]);
1032 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1033 reader_tasks[i] = NULL;
1037 if (stat_interval > 0) {
1038 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1039 stats_task = kthread_run(rcu_torture_stats, NULL,
1040 "rcu_torture_stats");
1041 if (IS_ERR(stats_task)) {
1042 firsterr = PTR_ERR(stats_task);
1043 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1048 if (test_no_idle_hz) {
1049 rcu_idle_cpu = num_online_cpus() - 1;
1050 /* Create the shuffler thread */
1051 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1052 "rcu_torture_shuffle");
1053 if (IS_ERR(shuffler_task)) {
1054 firsterr = PTR_ERR(shuffler_task);
1055 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1056 shuffler_task = NULL;
1063 /* Create the stutter thread */
1064 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1065 "rcu_torture_stutter");
1066 if (IS_ERR(stutter_task)) {
1067 firsterr = PTR_ERR(stutter_task);
1068 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1069 stutter_task = NULL;
1076 rcu_torture_cleanup();
1080 module_init(rcu_torture_init);
1081 module_exit(rcu_torture_cleanup);