[NET] smc91x: prepare SMC_USE_PXA_DMA to be specified in platform data
[safe/jmp/linux-2.6] / kernel / rcutorture.c
index 1c329df..33acc42 100644 (file)
 #include <linux/sched.h>
 #include <asm/atomic.h>
 #include <linux/bitops.h>
-#include <linux/module.h>
 #include <linux/completion.h>
 #include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
+#include <linux/freezer.h>
 #include <linux/cpu.h>
-#include <linux/random.h>
 #include <linux/delay.h>
 #include <linux/byteorder/swabb.h>
 #include <linux/stat.h>
 #include <linux/srcu.h>
+#include <linux/slab.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
@@ -60,19 +60,19 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
 static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
 
-module_param(nreaders, int, 0);
+module_param(nreaders, int, 0444);
 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0);
+module_param(nfakewriters, int, 0444);
 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0);
+module_param(stat_interval, int, 0444);
 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0);
+module_param(verbose, bool, 0444);
 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0);
+module_param(test_no_idle_hz, bool, 0444);
 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0);
+module_param(shuffle_interval, int, 0444);
 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(torture_type, charp, 0);
+module_param(torture_type, charp, 0444);
 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
 
 #define TORTURE_FLAG "-torture:"
@@ -165,16 +165,14 @@ struct rcu_random_state {
 
 /*
  * Crude but fast random-number generator.  Uses a linear congruential
- * generator, with occasional help from get_random_bytes().
+ * generator, with occasional help from cpu_clock().
  */
 static unsigned long
 rcu_random(struct rcu_random_state *rrsp)
 {
-       long refresh;
-
        if (--rrsp->rrs_count < 0) {
-               get_random_bytes(&refresh, sizeof(refresh));
-               rrsp->rrs_state += refresh;
+               rrsp->rrs_state +=
+                       (unsigned long)cpu_clock(raw_smp_processor_id());
                rrsp->rrs_count = RCU_RANDOM_REFRESH;
        }
        rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
@@ -370,6 +368,19 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .name = "rcu_bh"
 };
 
+static struct rcu_torture_ops rcu_bh_sync_ops = {
+       .init = rcu_sync_torture_init,
+       .cleanup = NULL,
+       .readlock = rcu_bh_torture_read_lock,
+       .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
+       .readunlock = rcu_bh_torture_read_unlock,
+       .completed = rcu_bh_torture_completed,
+       .deferredfree = rcu_sync_torture_deferred_free,
+       .sync = rcu_bh_torture_synchronize,
+       .stats = NULL,
+       .name = "rcu_bh_sync"
+};
+
 /*
  * Definitions for srcu torture testing.
  */
@@ -388,7 +399,7 @@ static void srcu_torture_cleanup(void)
        cleanup_srcu_struct(&srcu_ctl);
 }
 
-static int srcu_torture_read_lock(void)
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 {
        return srcu_read_lock(&srcu_ctl);
 }
@@ -406,7 +417,7 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
                schedule_timeout_interruptible(longdelay);
 }
 
-static void srcu_torture_read_unlock(int idx)
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 {
        srcu_read_unlock(&srcu_ctl, idx);
 }
@@ -451,8 +462,43 @@ static struct rcu_torture_ops srcu_ops = {
        .name = "srcu"
 };
 
-static struct rcu_torture_ops *torture_ops[] =
-       { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &srcu_ops, NULL };
+/*
+ * Definitions for sched torture testing.
+ */
+
+static int sched_torture_read_lock(void)
+{
+       preempt_disable();
+       return 0;
+}
+
+static void sched_torture_read_unlock(int idx)
+{
+       preempt_enable();
+}
+
+static int sched_torture_completed(void)
+{
+       return 0;
+}
+
+static void sched_torture_synchronize(void)
+{
+       synchronize_sched();
+}
+
+static struct rcu_torture_ops sched_ops = {
+       .init = rcu_sync_torture_init,
+       .cleanup = NULL,
+       .readlock = sched_torture_read_lock,
+       .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
+       .readunlock = sched_torture_read_unlock,
+       .completed = sched_torture_completed,
+       .deferredfree = rcu_sync_torture_deferred_free,
+       .sync = sched_torture_synchronize,
+       .stats = NULL,
+       .name = "sched"
+};
 
 /*
  * RCU torture writer kthread.  Repeatedly substitutes a new structure
@@ -481,7 +527,7 @@ rcu_torture_writer(void *arg)
                rp->rtort_mbtest = 1;
                rcu_assign_pointer(rcu_torture_current, rp);
                smp_wmb();
-               if (old_rp != NULL) {
+               if (old_rp) {
                        i = old_rp->rtort_pipe_count;
                        if (i > RCU_TORTURE_PIPE_LEN)
                                i = RCU_TORTURE_PIPE_LEN;
@@ -630,7 +676,7 @@ rcu_torture_printk(char *page)
                               atomic_read(&rcu_torture_wcount[i]));
        }
        cnt += sprintf(&page[cnt], "\n");
-       if (cur_ops->stats != NULL)
+       if (cur_ops->stats)
                cnt += cur_ops->stats(&page[cnt]);
        return cnt;
 }
@@ -678,46 +724,49 @@ static int rcu_idle_cpu;  /* Force all torture tasks off this CPU */
  */
 static void rcu_torture_shuffle_tasks(void)
 {
-       cpumask_t tmp_mask = CPU_MASK_ALL;
+       cpumask_t tmp_mask;
        int i;
 
-       lock_cpu_hotplug();
+       cpus_setall(tmp_mask);
+       get_online_cpus();
 
        /* No point in shuffling if there is only one online CPU (ex: UP) */
        if (num_online_cpus() == 1) {
-               unlock_cpu_hotplug();
+               put_online_cpus();
                return;
        }
 
        if (rcu_idle_cpu != -1)
                cpu_clear(rcu_idle_cpu, tmp_mask);
 
-       set_cpus_allowed(current, tmp_mask);
+       set_cpus_allowed_ptr(current, &tmp_mask);
 
-       if (reader_tasks != NULL) {
+       if (reader_tasks) {
                for (i = 0; i < nrealreaders; i++)
                        if (reader_tasks[i])
-                               set_cpus_allowed(reader_tasks[i], tmp_mask);
+                               set_cpus_allowed_ptr(reader_tasks[i],
+                                                    &tmp_mask);
        }
 
-       if (fakewriter_tasks != NULL) {
+       if (fakewriter_tasks) {
                for (i = 0; i < nfakewriters; i++)
                        if (fakewriter_tasks[i])
-                               set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
+                               set_cpus_allowed_ptr(fakewriter_tasks[i],
+                                                    &tmp_mask);
        }
 
        if (writer_task)
-               set_cpus_allowed(writer_task, tmp_mask);
+               set_cpus_allowed_ptr(writer_task, &tmp_mask);
 
        if (stats_task)
-               set_cpus_allowed(stats_task, tmp_mask);
+               set_cpus_allowed_ptr(stats_task, &tmp_mask);
 
        if (rcu_idle_cpu == -1)
                rcu_idle_cpu = num_online_cpus() - 1;
        else
                rcu_idle_cpu--;
 
-       unlock_cpu_hotplug();
+       put_online_cpus();
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -753,21 +802,21 @@ rcu_torture_cleanup(void)
        int i;
 
        fullstop = 1;
-       if (shuffler_task != NULL) {
+       if (shuffler_task) {
                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
                kthread_stop(shuffler_task);
        }
        shuffler_task = NULL;
 
-       if (writer_task != NULL) {
+       if (writer_task) {
                VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
                kthread_stop(writer_task);
        }
        writer_task = NULL;
 
-       if (reader_tasks != NULL) {
+       if (reader_tasks) {
                for (i = 0; i < nrealreaders; i++) {
-                       if (reader_tasks[i] != NULL) {
+                       if (reader_tasks[i]) {
                                VERBOSE_PRINTK_STRING(
                                        "Stopping rcu_torture_reader task");
                                kthread_stop(reader_tasks[i]);
@@ -779,9 +828,9 @@ rcu_torture_cleanup(void)
        }
        rcu_torture_current = NULL;
 
-       if (fakewriter_tasks != NULL) {
+       if (fakewriter_tasks) {
                for (i = 0; i < nfakewriters; i++) {
-                       if (fakewriter_tasks[i] != NULL) {
+                       if (fakewriter_tasks[i]) {
                                VERBOSE_PRINTK_STRING(
                                        "Stopping rcu_torture_fakewriter task");
                                kthread_stop(fakewriter_tasks[i]);
@@ -792,7 +841,7 @@ rcu_torture_cleanup(void)
                fakewriter_tasks = NULL;
        }
 
-       if (stats_task != NULL) {
+       if (stats_task) {
                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
                kthread_stop(stats_task);
        }
@@ -803,7 +852,7 @@ rcu_torture_cleanup(void)
 
        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
 
-       if (cur_ops->cleanup != NULL)
+       if (cur_ops->cleanup)
                cur_ops->cleanup();
        if (atomic_read(&n_rcu_torture_error))
                rcu_torture_print_module_parms("End of test: FAILURE");
@@ -811,27 +860,28 @@ rcu_torture_cleanup(void)
                rcu_torture_print_module_parms("End of test: SUCCESS");
 }
 
-static int
+static int __init
 rcu_torture_init(void)
 {
        int i;
        int cpu;
        int firsterr = 0;
+       static struct rcu_torture_ops *torture_ops[] =
+               { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
+                 &srcu_ops, &sched_ops, };
 
        /* Process args and tell the world that the torturer is on the job. */
-
-       for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
+       for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
                cur_ops = torture_ops[i];
-               if (strcmp(torture_type, cur_ops->name) == 0) {
+               if (strcmp(torture_type, cur_ops->name) == 0)
                        break;
-               }
        }
-       if (cur_ops == NULL) {
+       if (i == ARRAY_SIZE(torture_ops)) {
                printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
                       torture_type);
                return (-EINVAL);
        }
-       if (cur_ops->init != NULL)
+       if (cur_ops->init)
                cur_ops->init(); /* no "goto unwind" prior to this point!!! */
 
        if (nreaders >= 0)
@@ -844,7 +894,7 @@ rcu_torture_init(void)
        /* Set up the freelist. */
 
        INIT_LIST_HEAD(&rcu_torture_freelist);
-       for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
+       for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
                rcu_tortures[i].rtort_mbtest = 0;
                list_add_tail(&rcu_tortures[i].rtort_free,
                              &rcu_torture_freelist);