Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[safe/jmp/linux-2.6] / kernel / rcutree_plugin.h
index c9616e4..1cee04f 100644 (file)
@@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu)
                 * on line!
                 */
                WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
-               phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
+               WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
+               phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
                list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
-               smp_mb();  /* Ensure later ctxt swtch seen after above. */
                spin_unlock_irqrestore(&rnp->lock, flags);
        }
 
@@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu)
         * means that we continue to block the current grace period.
         */
        rcu_preempt_qs(cpu);
+       local_irq_save(flags);
        t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+       local_irq_restore(flags);
 }
 
 /*
@@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
                 */
                for (;;) {
                        rnp = t->rcu_blocked_node;
-                       spin_lock(&rnp->lock);
+                       spin_lock(&rnp->lock);  /* irqs already disabled. */
                        if (rnp == t->rcu_blocked_node)
                                break;
-                       spin_unlock(&rnp->lock);
+                       spin_unlock(&rnp->lock);  /* irqs remain disabled. */
                }
                empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
                list_del_init(&t->rcu_node_entry);
@@ -206,7 +208,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
                 */
                if (!empty && rnp->qsmask == 0 &&
                    list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
-                       t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+                       struct rcu_node *rnp_p;
+
                        if (rnp->parent == NULL) {
                                /* Only one rcu_node in the tree. */
                                cpu_quiet_msk_finish(&rcu_preempt_state, flags);
@@ -215,9 +218,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
                        /* Report up the rest of the hierarchy. */
                        mask = rnp->grpmask;
                        spin_unlock_irqrestore(&rnp->lock, flags);
-                       rnp = rnp->parent;
-                       spin_lock_irqsave(&rnp->lock, flags);
-                       cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
+                       rnp_p = rnp->parent;
+                       spin_lock_irqsave(&rnp_p->lock, flags);
+                       WARN_ON_ONCE(rnp->qsmask);
+                       cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags);
                        return;
                }
                spin_unlock(&rnp->lock);
@@ -278,6 +282,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
        WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
+       WARN_ON_ONCE(rnp->qsmask);
 }
 
 /*
@@ -302,7 +307,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
  * The caller must hold rnp->lock with irqs disabled.
  */
 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                     struct rcu_node *rnp)
+                                     struct rcu_node *rnp,
+                                     struct rcu_data *rdp)
 {
        int i;
        struct list_head *lp;
@@ -314,6 +320,9 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
                WARN_ONCE(1, "Last CPU thought to be offlined?");
                return;  /* Shouldn't happen: at least one CPU online. */
        }
+       WARN_ON_ONCE(rnp != rdp->mynode &&
+                    (!list_empty(&rnp->blocked_tasks[0]) ||
+                     !list_empty(&rnp->blocked_tasks[1])));
 
        /*
         * Move tasks up to root rcu_node.  Rely on the fact that the
@@ -361,9 +370,8 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
+       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
-       }
 }
 
 /*
@@ -467,10 +475,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
 
 /*
  * Because there is no preemptable RCU, there can be no readers blocked,
- * so there is no need to check for blocked tasks.
+ * so there is no need to check for blocked tasks.  So check only for
+ * bogus qsmask values.
  */
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
+       WARN_ON_ONCE(rnp->qsmask);
 }
 
 /*
@@ -489,7 +499,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
  * tasks that were blocked within RCU read-side critical sections.
  */
 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                     struct rcu_node *rnp)
+                                     struct rcu_node *rnp,
+                                     struct rcu_data *rdp)
 {
 }