Merge branch 'master' into percpu
[safe/jmp/linux-2.6] / arch / sparc / kernel / nmi.c
index d1614e8..b287b62 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/delay.h>
 #include <linux/smp.h>
 
+#include <asm/perf_event.h>
 #include <asm/ptrace.h>
-#include <asm/local.h>
 #include <asm/pcr.h>
 
 /* We don't have a real NMI on sparc64, but we can fake one
@@ -31,7 +31,6 @@
  * level 14 as our IRQ off level.
  */
 
-static int nmi_watchdog_active;
 static int panic_on_timeout;
 
 /* nmi_active:
@@ -47,12 +46,12 @@ static DEFINE_PER_CPU(short, wd_enabled);
 static int endflag __initdata;
 
 static DEFINE_PER_CPU(unsigned int, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
 static DEFINE_PER_CPU(int, nmi_touch);
 
 void touch_nmi_watchdog(void)
 {
-       if (nmi_watchdog_active) {
+       if (atomic_read(&nmi_active)) {
                int cpu;
 
                for_each_present_cpu(cpu) {
@@ -85,6 +84,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
        if (do_panic || panic_on_oops)
                panic("Non maskable interrupt");
 
+       nmi_exit();
        local_irq_enable();
        do_exit(SIGBUS);
 }
@@ -95,13 +95,16 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
        int cpu = smp_processor_id();
 
        clear_softint(1 << irq);
-       pcr_ops->write(PCR_PIC_PRIV);
 
        local_cpu_data().__nmi_count++;
 
+       nmi_enter();
+
        if (notify_die(DIE_NMI, "nmi", regs, 0,
                       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
                touched = 1;
+       else
+               pcr_ops->write(PCR_PIC_PRIV);
 
        sum = kstat_irqs_cpu(0, cpu);
        if (__get_cpu_var(nmi_touch)) {
@@ -109,18 +112,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                touched = 1;
        }
        if (!touched && __get_cpu_var(last_irq_sum) == sum) {
-               local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+               __this_cpu_inc(alert_counter);
+               if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
                        die_nmi("BUG: NMI Watchdog detected LOCKUP",
                                regs, panic_on_timeout);
        } else {
                __get_cpu_var(last_irq_sum) = sum;
-               local_set(&__get_cpu_var(alert_counter), 0);
+               __this_cpu_write(alert_counter, 0);
        }
        if (__get_cpu_var(wd_enabled)) {
                write_pic(picl_value(nmi_hz));
                pcr_ops->write(pcr_enable);
        }
+
+       nmi_exit();
 }
 
 static inline unsigned int get_nmi_count(int cpu)
@@ -152,7 +157,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
        atomic_dec(&nmi_active);
 }
 
-static void stop_nmi_watchdog(void *unused)
+void stop_nmi_watchdog(void *unused)
 {
        pcr_ops->write(PCR_PIC_PRIV);
        __get_cpu_var(wd_enabled) = 0;
@@ -206,7 +211,7 @@ error:
        return err;
 }
 
-static void start_nmi_watchdog(void *unused)
+void start_nmi_watchdog(void *unused)
 {
        __get_cpu_var(wd_enabled) = 1;
        atomic_inc(&nmi_active);
@@ -259,6 +264,9 @@ int __init nmi_init(void)
                        atomic_set(&nmi_active, -1);
                }
        }
+       if (!err)
+               init_hw_perf_events();
+
        return err;
 }