sched: Move update_curr() in check_preempt_wakeup() to avoid redundant call
[safe/jmp/linux-2.6] / kernel / softirq.c
index eb5e131..21939d9 100644 (file)
@@ -57,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
-       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
+       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
        "TASKLET", "SCHED", "HRTIMER",  "RCU"
 };
 
@@ -227,7 +227,7 @@ restart:
                                preempt_count() = prev_count;
                        }
 
-                       rcu_bh_qsctr_inc(cpu);
+                       rcu_bh_qs(cpu);
                }
                h++;
                pending >>= 1;
@@ -302,9 +302,9 @@ void irq_exit(void)
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
 
+       rcu_irq_exit();
 #ifdef CONFIG_NO_HZ
        /* Make sure that timer wheel updates are propagated */
-       rcu_irq_exit();
        if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
                tick_nohz_stop_sched_tick(0);
 #endif
@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu)
                        preempt_enable_no_resched();
                        cond_resched();
                        preempt_disable();
-                       rcu_qsctr_inc((long)__bind_cpu);
+                       rcu_sched_qs((long)__bind_cpu);
                }
                preempt_enable();
                set_current_state(TASK_INTERRUPTIBLE);