tracing/power: move the power trace headers to a dedicated file
[safe/jmp/linux-2.6] / kernel / softirq.c
index daf4635..6edfc2c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/rcupdate.h>
+#include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/tick.h>
 
@@ -79,13 +80,23 @@ static void __local_bh_disable(unsigned long ip)
        WARN_ON_ONCE(in_irq());
 
        raw_local_irq_save(flags);
-       add_preempt_count(SOFTIRQ_OFFSET);
+       /*
+        * The preempt tracer hooks into add_preempt_count and will break
+        * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
+        * is set and before current->softirq_enabled is cleared.
+        * We must manually increment preempt_count here and manually
+        * call the trace_preempt_off later.
+        */
+       preempt_count() += SOFTIRQ_OFFSET;
        /*
         * Were softirqs turned off above:
         */
        if (softirq_count() == SOFTIRQ_OFFSET)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
+
+       if (preempt_count() == SOFTIRQ_OFFSET)
+               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 #else /* !CONFIG_TRACE_IRQFLAGS */
 static inline void __local_bh_disable(unsigned long ip)
@@ -102,20 +113,6 @@ void local_bh_disable(void)
 
 EXPORT_SYMBOL(local_bh_disable);
 
-void __local_bh_enable(void)
-{
-       WARN_ON_ONCE(in_irq());
-
-       /*
-        * softirqs should never be enabled by __local_bh_enable(),
-        * it always nests inside local_bh_enable() sections:
-        */
-       WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
-
-       sub_preempt_count(SOFTIRQ_OFFSET);
-}
-EXPORT_SYMBOL_GPL(__local_bh_enable);
-
 /*
  * Special-case - softirqs can safely be enabled in
  * cond_resched_softirq(), or by __do_softirq(),
@@ -269,6 +266,7 @@ void irq_enter(void)
 {
        int cpu = smp_processor_id();
 
+       rcu_irq_enter();
        if (idle_cpu(cpu) && !in_interrupt()) {
                __irq_enter();
                tick_check_idle(cpu);
@@ -295,9 +293,9 @@ void irq_exit(void)
 
 #ifdef CONFIG_NO_HZ
        /* Make sure that timer wheel updates are propagated */
-       if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
-               tick_nohz_stop_sched_tick(0);
        rcu_irq_exit();
+       if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+               tick_nohz_stop_sched_tick(0);
 #endif
        preempt_enable_no_resched();
 }
@@ -746,7 +744,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
                        break;
                /* Unbind so it can run.  Fall thru. */
                kthread_bind(per_cpu(ksoftirqd, hotcpu),
-                            any_online_cpu(cpu_online_map));
+                            cpumask_any(cpu_online_mask));
        case CPU_DEAD:
        case CPU_DEAD_FROZEN: {
                struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };