perf_event: Clean up __perf_event_init_context()
[safe/jmp/linux-2.6] / kernel / rcutree.c
index d8d9865..f3077c0 100644 (file)
@@ -59,7 +59,7 @@
                NUM_RCU_LVL_2, \
                NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
        }, \
-       .signaled = RCU_SIGNAL_INIT, \
+       .signaled = RCU_GP_IDLE, \
        .gpnum = -300, \
        .completed = -300, \
        .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
         * irqs disabled.
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
-               spin_lock(&rnp->lock);  /* irqs already disabled. */
+               spin_lock(&rnp->lock);          /* irqs already disabled. */
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
                rnp->gpnum = rsp->gpnum;
-               spin_unlock(&rnp->lock);        /* irqs already disabled. */
+               spin_unlock(&rnp->lock);        /* irqs remain disabled. */
        }
 
+       rnp = rcu_get_root(rsp);
+       spin_lock(&rnp->lock);                  /* irqs already disabled. */
        rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+       spin_unlock(&rnp->lock);                /* irqs remain disabled. */
        spin_unlock_irqrestore(&rsp->onofflock, flags);
 }
 
@@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
 {
        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
        rsp->completed = rsp->gpnum;
+       rsp->signaled = RCU_GP_IDLE;
        rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
        rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
 }
@@ -913,7 +917,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
                        spin_unlock(&rnp->lock); /* irqs remain disabled. */
                        break;
                }
-               rcu_preempt_offline_tasks(rsp, rnp, rdp);
+
+               /*
+                * If there was a task blocking the current grace period,
+                * and if all CPUs have checked in, we need to propagate
+                * the quiescent state up the rcu_node hierarchy.  But that
+                * is inconvenient at the moment due to deadlock issues if
+                * this should end the current grace period.  So set the
+                * offlined CPU's bit in ->qsmask in order to force the
+                * next force_quiescent_state() invocation to clean up this
+                * mess in a deadlock-free manner.
+                */
+               if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
+                       rnp->qsmask |= mask;
+
                mask = rnp->grpmask;
                spin_unlock(&rnp->lock);        /* irqs remain disabled. */
                rnp = rnp->parent;
@@ -958,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
  * Invoke any RCU callbacks that have made it to the end of their grace
  * period.  Thottle as specified by rdp->blimit.
  */
-static void rcu_do_batch(struct rcu_data *rdp)
+static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        unsigned long flags;
        struct rcu_head *next, *list, **tail;
@@ -1011,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
        if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
                rdp->blimit = blimit;
 
+       /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
+       if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
+               rdp->qlen_last_fqs_check = 0;
+               rdp->n_force_qs_snap = rsp->n_force_qs;
+       } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
+               rdp->qlen_last_fqs_check = rdp->qlen;
+
        local_irq_restore(flags);
 
        /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1142,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
        }
        spin_unlock(&rnp->lock);
        switch (signaled) {
+       case RCU_GP_IDLE:
        case RCU_GP_INIT:
 
-               break; /* grace period still initializing, ignore. */
+               break; /* grace period idle or initializing, ignore. */
 
        case RCU_SAVE_DYNTICK:
 
@@ -1158,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
 
                /* Update state, record completion counter. */
                spin_lock(&rnp->lock);
-               if (lastcomp == rsp->completed) {
+               if (lastcomp == rsp->completed &&
+                   rsp->signaled == RCU_SAVE_DYNTICK) {
                        rsp->signaled = RCU_FORCE_QS;
                        dyntick_record_completed(rsp, lastcomp);
                }
@@ -1224,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* If there are callbacks ready, invoke them. */
-       rcu_do_batch(rdp);
+       rcu_do_batch(rsp, rdp);
 }
 
 /*
@@ -1288,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
                rcu_start_gp(rsp, nestflag);  /* releases rnp_root->lock. */
        }
 
-       /* Force the grace period if too many callbacks or too long waiting. */
-       if (unlikely(++rdp->qlen > qhimark)) {
+       /*
+        * Force the grace period if too many callbacks or too long waiting.
+        * Enforce hysteresis, and don't invoke force_quiescent_state()
+        * if some other CPU has recently done so.  Also, don't bother
+        * invoking force_quiescent_state() if the newly enqueued callback
+        * is the only one waiting for a grace period to complete.
+        */
+       if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
                rdp->blimit = LONG_MAX;
-               force_quiescent_state(rsp, 0);
+               if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+                   *rdp->nxttail[RCU_DONE_TAIL] != head)
+                       force_quiescent_state(rsp, 0);
+               rdp->n_force_qs_snap = rsp->n_force_qs;
+               rdp->qlen_last_fqs_check = rdp->qlen;
        } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
                force_quiescent_state(rsp, 1);
        local_irq_restore(flags);
@@ -1523,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
        rdp->beenonline = 1;     /* We have now been online. */
        rdp->preemptable = preemptable;
        rdp->passed_quiesc_completed = lastcomp - 1;
+       rdp->qlen_last_fqs_check = 0;
+       rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
        spin_unlock(&rnp->lock);                /* irqs remain disabled. */
 
@@ -1647,7 +1685,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                cpustride *= rsp->levelspread[i];
                rnp = rsp->level[i];
                for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
-                       spin_lock_init(&rnp->lock);
+                       if (rnp != rcu_get_root(rsp))
+                               spin_lock_init(&rnp->lock);
                        rnp->gpnum = 0;
                        rnp->qsmask = 0;
                        rnp->qsmaskinit = 0;
@@ -1670,6 +1709,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
                }
        }
+       spin_lock_init(&rcu_get_root(rsp)->lock);
 }
 
 /*