[PATCH] Fix bug in RCU torture test
[safe/jmp/linux-2.6] / kernel / rcutorture.c
1 /*
2  * Read-Copy Update /proc-based torture test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2005
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *
22  * See also:  Documentation/RCU/torture.txt
23  */
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/smp.h>
32 #include <linux/rcupdate.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <asm/atomic.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/rcuref.h>
43 #include <linux/cpu.h>
44 #include <linux/random.h>
45 #include <linux/delay.h>
46 #include <linux/byteorder/swabb.h>
47 #include <linux/stat.h>
48
49 MODULE_LICENSE("GPL");
50
51 static int nreaders = -1;       /* # reader threads, defaults to 4*ncpus */
52 static int stat_interval = 0;   /* Interval between stats, in seconds. */
53                                 /*  Defaults to "only at end of test". */
54 static int verbose = 0;         /* Print more debug info. */
55
56 MODULE_PARM(nreaders, "i");
57 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
58 MODULE_PARM(stat_interval, "i");
59 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
60 MODULE_PARM(verbose, "i");
61 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
62 #define TORTURE_FLAG "rcutorture: "
63 #define PRINTK_STRING(s) \
64         do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
65 #define VERBOSE_PRINTK_STRING(s) \
66         do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
67 #define VERBOSE_PRINTK_ERRSTRING(s) \
68         do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
69
70 static char printk_buf[4096];
71
72 static int nrealreaders;
73 static struct task_struct *writer_task;
74 static struct task_struct **reader_tasks;
75 static struct task_struct *stats_task;
76
77 #define RCU_TORTURE_PIPE_LEN 10
78
79 struct rcu_torture {
80         struct rcu_head rtort_rcu;
81         int rtort_pipe_count;
82         struct list_head rtort_free;
83         int rtort_mbtest;
84 };
85
86 static int fullstop = 0;        /* stop generating callbacks at test end. */
87 static LIST_HEAD(rcu_torture_freelist);
88 static struct rcu_torture *rcu_torture_current = NULL;
89 static long rcu_torture_current_version = 0;
90 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
91 static DEFINE_SPINLOCK(rcu_torture_lock);
92 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93         { 0 };
94 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
95         { 0 };
96 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
97 atomic_t n_rcu_torture_alloc;
98 atomic_t n_rcu_torture_alloc_fail;
99 atomic_t n_rcu_torture_free;
100 atomic_t n_rcu_torture_mberror;
101 atomic_t n_rcu_torture_error;
102
103 /*
104  * Allocate an element from the rcu_tortures pool.
105  */
106 struct rcu_torture *
107 rcu_torture_alloc(void)
108 {
109         struct list_head *p;
110
111         spin_lock(&rcu_torture_lock);
112         if (list_empty(&rcu_torture_freelist)) {
113                 atomic_inc(&n_rcu_torture_alloc_fail);
114                 spin_unlock(&rcu_torture_lock);
115                 return NULL;
116         }
117         atomic_inc(&n_rcu_torture_alloc);
118         p = rcu_torture_freelist.next;
119         list_del_init(p);
120         spin_unlock(&rcu_torture_lock);
121         return container_of(p, struct rcu_torture, rtort_free);
122 }
123
124 /*
125  * Free an element to the rcu_tortures pool.
126  */
127 static void
128 rcu_torture_free(struct rcu_torture *p)
129 {
130         atomic_inc(&n_rcu_torture_free);
131         spin_lock(&rcu_torture_lock);
132         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
133         spin_unlock(&rcu_torture_lock);
134 }
135
136 static void
137 rcu_torture_cb(struct rcu_head *p)
138 {
139         int i;
140         struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
141
142         if (fullstop) {
143                 /* Test is ending, just drop callbacks on the floor. */
144                 /* The next initialization will pick up the pieces. */
145                 return;
146         }
147         i = rp->rtort_pipe_count;
148         if (i > RCU_TORTURE_PIPE_LEN)
149                 i = RCU_TORTURE_PIPE_LEN;
150         atomic_inc(&rcu_torture_wcount[i]);
151         if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
152                 rp->rtort_mbtest = 0;
153                 rcu_torture_free(rp);
154         } else
155                 call_rcu(p, rcu_torture_cb);
156 }
157
158 struct rcu_random_state {
159         unsigned long rrs_state;
160         unsigned long rrs_count;
161 };
162
163 #define RCU_RANDOM_MULT 39916801  /* prime */
164 #define RCU_RANDOM_ADD  479001701 /* prime */
165 #define RCU_RANDOM_REFRESH 10000
166
167 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
168
169 /*
170  * Crude but fast random-number generator.  Uses a linear congruential
171  * generator, with occasional help from get_random_bytes().
172  */
173 static long
174 rcu_random(struct rcu_random_state *rrsp)
175 {
176         long refresh;
177
178         if (--rrsp->rrs_count < 0) {
179                 get_random_bytes(&refresh, sizeof(refresh));
180                 rrsp->rrs_state += refresh;
181                 rrsp->rrs_count = RCU_RANDOM_REFRESH;
182         }
183         rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
184         return swahw32(rrsp->rrs_state);
185 }
186
187 /*
188  * RCU torture writer kthread.  Repeatedly substitutes a new structure
189  * for that pointed to by rcu_torture_current, freeing the old structure
190  * after a series of grace periods (the "pipeline").
191  */
192 static int
193 rcu_torture_writer(void *arg)
194 {
195         int i;
196         long oldbatch = rcu_batches_completed();
197         struct rcu_torture *rp;
198         struct rcu_torture *old_rp;
199         static DEFINE_RCU_RANDOM(rand);
200
201         VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
202         set_user_nice(current, 19);
203
204         do {
205                 schedule_timeout_uninterruptible(1);
206                 if (rcu_batches_completed() == oldbatch)
207                         continue;
208                 if ((rp = rcu_torture_alloc()) == NULL)
209                         continue;
210                 rp->rtort_pipe_count = 0;
211                 udelay(rcu_random(&rand) & 0x3ff);
212                 old_rp = rcu_torture_current;
213                 rp->rtort_mbtest = 1;
214                 rcu_assign_pointer(rcu_torture_current, rp);
215                 smp_wmb();
216                 if (old_rp != NULL) {
217                         i = old_rp->rtort_pipe_count;
218                         if (i > RCU_TORTURE_PIPE_LEN)
219                                 i = RCU_TORTURE_PIPE_LEN;
220                         atomic_inc(&rcu_torture_wcount[i]);
221                         old_rp->rtort_pipe_count++;
222                         call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
223                 }
224                 rcu_torture_current_version++;
225                 oldbatch = rcu_batches_completed();
226         } while (!kthread_should_stop() && !fullstop);
227         VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
228         while (!kthread_should_stop())
229                 schedule_timeout_uninterruptible(1);
230         return 0;
231 }
232
233 /*
234  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
235  * incrementing the corresponding element of the pipeline array.  The
236  * counter in the element should never be greater than 1, otherwise, the
237  * RCU implementation is broken.
238  */
239 static int
240 rcu_torture_reader(void *arg)
241 {
242         int completed;
243         DEFINE_RCU_RANDOM(rand);
244         struct rcu_torture *p;
245         int pipe_count;
246
247         VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
248         set_user_nice(current, 19);
249
250         do {
251                 rcu_read_lock();
252                 completed = rcu_batches_completed();
253                 p = rcu_dereference(rcu_torture_current);
254                 if (p == NULL) {
255                         /* Wait for rcu_torture_writer to get underway */
256                         rcu_read_unlock();
257                         schedule_timeout_interruptible(HZ);
258                         continue;
259                 }
260                 if (p->rtort_mbtest == 0)
261                         atomic_inc(&n_rcu_torture_mberror);
262                 udelay(rcu_random(&rand) & 0x7f);
263                 preempt_disable();
264                 pipe_count = p->rtort_pipe_count;
265                 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
266                         /* Should not happen, but... */
267                         pipe_count = RCU_TORTURE_PIPE_LEN;
268                 }
269                 ++__get_cpu_var(rcu_torture_count)[pipe_count];
270                 completed = rcu_batches_completed() - completed;
271                 if (completed > RCU_TORTURE_PIPE_LEN) {
272                         /* Should not happen, but... */
273                         completed = RCU_TORTURE_PIPE_LEN;
274                 }
275                 ++__get_cpu_var(rcu_torture_batch)[completed];
276                 preempt_enable();
277                 rcu_read_unlock();
278                 schedule();
279         } while (!kthread_should_stop() && !fullstop);
280         VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
281         while (!kthread_should_stop())
282                 schedule_timeout_uninterruptible(1);
283         return 0;
284 }
285
286 /*
287  * Create an RCU-torture statistics message in the specified buffer.
288  */
289 static int
290 rcu_torture_printk(char *page)
291 {
292         int cnt = 0;
293         int cpu;
294         int i;
295         long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
296         long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
297
298         for_each_cpu(cpu) {
299                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
300                         pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
301                         batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
302                 }
303         }
304         for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
305                 if (pipesummary[i] != 0)
306                         break;
307         }
308         cnt += sprintf(&page[cnt], "rcutorture: ");
309         cnt += sprintf(&page[cnt],
310                        "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
311                        "rtmbe: %d",
312                        rcu_torture_current,
313                        rcu_torture_current_version,
314                        list_empty(&rcu_torture_freelist),
315                        atomic_read(&n_rcu_torture_alloc),
316                        atomic_read(&n_rcu_torture_alloc_fail),
317                        atomic_read(&n_rcu_torture_free),
318                        atomic_read(&n_rcu_torture_mberror));
319         if (atomic_read(&n_rcu_torture_mberror) != 0)
320                 cnt += sprintf(&page[cnt], " !!!");
321         cnt += sprintf(&page[cnt], "\nrcutorture: ");
322         if (i > 1) {
323                 cnt += sprintf(&page[cnt], "!!! ");
324                 atomic_inc(&n_rcu_torture_error);
325         }
326         cnt += sprintf(&page[cnt], "Reader Pipe: ");
327         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
328                 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
329         cnt += sprintf(&page[cnt], "\nrcutorture: ");
330         cnt += sprintf(&page[cnt], "Reader Batch: ");
331         for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
332                 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
333         cnt += sprintf(&page[cnt], "\nrcutorture: ");
334         cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
335         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
336                 cnt += sprintf(&page[cnt], " %d",
337                                atomic_read(&rcu_torture_wcount[i]));
338         }
339         cnt += sprintf(&page[cnt], "\n");
340         return cnt;
341 }
342
343 /*
344  * Print torture statistics.  Caller must ensure that there is only
345  * one call to this function at a given time!!!  This is normally
346  * accomplished by relying on the module system to only have one copy
347  * of the module loaded, and then by giving the rcu_torture_stats
348  * kthread full control (or the init/cleanup functions when rcu_torture_stats
349  * thread is not running).
350  */
351 static void
352 rcu_torture_stats_print(void)
353 {
354         int cnt;
355
356         cnt = rcu_torture_printk(printk_buf);
357         printk(KERN_ALERT "%s", printk_buf);
358 }
359
360 /*
361  * Periodically prints torture statistics, if periodic statistics printing
362  * was specified via the stat_interval module parameter.
363  *
364  * No need to worry about fullstop here, since this one doesn't reference
365  * volatile state or register callbacks.
366  */
367 static int
368 rcu_torture_stats(void *arg)
369 {
370         VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
371         do {
372                 schedule_timeout_interruptible(stat_interval * HZ);
373                 rcu_torture_stats_print();
374         } while (!kthread_should_stop());
375         VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
376         return 0;
377 }
378
379 static void
380 rcu_torture_cleanup(void)
381 {
382         int i;
383
384         fullstop = 1;
385         if (writer_task != NULL) {
386                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
387                 kthread_stop(writer_task);
388         }
389         writer_task = NULL;
390
391         if (reader_tasks != NULL) {
392                 for (i = 0; i < nrealreaders; i++) {
393                         if (reader_tasks[i] != NULL) {
394                                 VERBOSE_PRINTK_STRING(
395                                         "Stopping rcu_torture_reader task");
396                                 kthread_stop(reader_tasks[i]);
397                         }
398                         reader_tasks[i] = NULL;
399                 }
400                 kfree(reader_tasks);
401                 reader_tasks = NULL;
402         }
403         rcu_torture_current = NULL;
404
405         if (stats_task != NULL) {
406                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
407                 kthread_stop(stats_task);
408         }
409         stats_task = NULL;
410
411         /* Wait for all RCU callbacks to fire.  */
412         rcu_barrier();
413
414         rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
415         printk(KERN_ALERT TORTURE_FLAG
416                "--- End of test: %s\n",
417                atomic_read(&n_rcu_torture_error) == 0 ? "SUCCESS" : "FAILURE");
418 }
419
420 static int
421 rcu_torture_init(void)
422 {
423         int i;
424         int cpu;
425         int firsterr = 0;
426
427         /* Process args and tell the world that the torturer is on the job. */
428
429         if (nreaders >= 0)
430                 nrealreaders = nreaders;
431         else
432                 nrealreaders = 2 * num_online_cpus();
433         printk(KERN_ALERT TORTURE_FLAG
434                "--- Start of test: nreaders=%d stat_interval=%d verbose=%d\n",
435                nrealreaders, stat_interval, verbose);
436         fullstop = 0;
437
438         /* Set up the freelist. */
439
440         INIT_LIST_HEAD(&rcu_torture_freelist);
441         for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
442                 rcu_tortures[i].rtort_mbtest = 0;
443                 list_add_tail(&rcu_tortures[i].rtort_free,
444                               &rcu_torture_freelist);
445         }
446
447         /* Initialize the statistics so that each run gets its own numbers. */
448
449         rcu_torture_current = NULL;
450         rcu_torture_current_version = 0;
451         atomic_set(&n_rcu_torture_alloc, 0);
452         atomic_set(&n_rcu_torture_alloc_fail, 0);
453         atomic_set(&n_rcu_torture_free, 0);
454         atomic_set(&n_rcu_torture_mberror, 0);
455         atomic_set(&n_rcu_torture_error, 0);
456         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
457                 atomic_set(&rcu_torture_wcount[i], 0);
458         for_each_cpu(cpu) {
459                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
460                         per_cpu(rcu_torture_count, cpu)[i] = 0;
461                         per_cpu(rcu_torture_batch, cpu)[i] = 0;
462                 }
463         }
464
465         /* Start up the kthreads. */
466
467         VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
468         writer_task = kthread_run(rcu_torture_writer, NULL,
469                                   "rcu_torture_writer");
470         if (IS_ERR(writer_task)) {
471                 firsterr = PTR_ERR(writer_task);
472                 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
473                 writer_task = NULL;
474                 goto unwind;
475         }
476         reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
477                                GFP_KERNEL);
478         if (reader_tasks == NULL) {
479                 VERBOSE_PRINTK_ERRSTRING("out of memory");
480                 firsterr = -ENOMEM;
481                 goto unwind;
482         }
483         for (i = 0; i < nrealreaders; i++) {
484                 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
485                 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
486                                               "rcu_torture_reader");
487                 if (IS_ERR(reader_tasks[i])) {
488                         firsterr = PTR_ERR(reader_tasks[i]);
489                         VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
490                         reader_tasks[i] = NULL;
491                         goto unwind;
492                 }
493         }
494         if (stat_interval > 0) {
495                 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
496                 stats_task = kthread_run(rcu_torture_stats, NULL,
497                                         "rcu_torture_stats");
498                 if (IS_ERR(stats_task)) {
499                         firsterr = PTR_ERR(stats_task);
500                         VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
501                         stats_task = NULL;
502                         goto unwind;
503                 }
504         }
505         return 0;
506
507 unwind:
508         rcu_torture_cleanup();
509         return firsterr;
510 }
511
512 module_init(rcu_torture_init);
513 module_exit(rcu_torture_cleanup);