rcu: add rcu_barrier_sched() and rcu_barrier_bh()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 12 May 2008 19:21:05 +0000 (21:21 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 19 May 2008 08:01:36 +0000 (10:01 +0200)
Add rcu_barrier_sched() and rcu_barrier_bh().  With these in place,
rcutorture no longer gives the occasional oops when repeatedly starting
and stopping torturing rcu_bh.  Also adds the API needed to flush out
pre-existing call_rcu_sched() callbacks.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/rcupdate.h
kernel/rcupdate.c

index 411969c..e8b4039 100644 (file)
@@ -246,6 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head,
 /* Exported common interfaces */
 extern void synchronize_rcu(void);
 extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
 
 /* Internal to kernel */
 extern void rcu_init(void);
index a4e329d..4a74b8d 100644 (file)
 #include <linux/mutex.h>
 #include <linux/module.h>
 
+enum rcu_barrier {
+       RCU_BARRIER_STD,
+       RCU_BARRIER_BH,
+       RCU_BARRIER_SCHED,
+};
+
 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
 static atomic_t rcu_barrier_cpu_count;
 static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -83,19 +89,30 @@ static void rcu_barrier_callback(struct rcu_head *notused)
 /*
  * Called with preemption disabled, and from cross-cpu IRQ context.
  */
-static void rcu_barrier_func(void *notused)
+static void rcu_barrier_func(void *type)
 {
        int cpu = smp_processor_id();
        struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
 
        atomic_inc(&rcu_barrier_cpu_count);
-       call_rcu(head, rcu_barrier_callback);
+       switch ((enum rcu_barrier)type) {
+       case RCU_BARRIER_STD:
+               call_rcu(head, rcu_barrier_callback);
+               break;
+       case RCU_BARRIER_BH:
+               call_rcu_bh(head, rcu_barrier_callback);
+               break;
+       case RCU_BARRIER_SCHED:
+               call_rcu_sched(head, rcu_barrier_callback);
+               break;
+       }
 }
 
-/**
- * rcu_barrier - Wait until all the in-flight RCUs are complete.
+/*
+ * Orchestrate the specified type of RCU barrier, waiting for all
+ * RCU callbacks of the specified type to complete.
  */
-void rcu_barrier(void)
+static void _rcu_barrier(enum rcu_barrier type)
 {
        BUG_ON(in_interrupt());
        /* Take cpucontrol mutex to protect against CPU hotplug */
@@ -111,13 +128,39 @@ void rcu_barrier(void)
         * until all the callbacks are queued.
         */
        rcu_read_lock();
-       on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+       on_each_cpu(rcu_barrier_func, (void *)type, 0, 1);
        rcu_read_unlock();
        wait_for_completion(&rcu_barrier_completion);
        mutex_unlock(&rcu_barrier_mutex);
 }
+
+/**
+ * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
+ */
+void rcu_barrier(void)
+{
+       _rcu_barrier(RCU_BARRIER_STD);
+}
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
+/**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+void rcu_barrier_bh(void)
+{
+       _rcu_barrier(RCU_BARRIER_BH);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+
+/**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+ */
+void rcu_barrier_sched(void)
+{
+       _rcu_barrier(RCU_BARRIER_SCHED);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_sched);
+
 void __init rcu_init(void)
 {
        __rcu_init();