AMD IOMMU: add device detach function for IOMMU API
[safe/jmp/linux-2.6] / arch / x86 / kernel / nmi.c
index 326a8f4..8bd1bf9 100644 (file)
 #include <linux/kdebug.h>
 #include <linux/smp.h>
 
+#include <asm/i8259.h>
+#include <asm/io_apic.h>
+#include <asm/smp.h>
+#include <asm/nmi.h>
 #include <asm/proto.h>
 #include <asm/timer.h>
 
@@ -48,7 +52,7 @@ static cpumask_t backtrace_mask = CPU_MASK_NONE;
 atomic_t nmi_active = ATOMIC_INIT(0);          /* oprofile uses this */
 EXPORT_SYMBOL(nmi_active);
 
-unsigned int nmi_watchdog = NMI_DEFAULT;
+unsigned int nmi_watchdog = NMI_NONE;
 EXPORT_SYMBOL(nmi_watchdog);
 
 static int panic_on_timeout;
@@ -68,7 +72,7 @@ static inline unsigned int get_nmi_count(int cpu)
 
 static inline int mce_in_progress(void)
 {
-#if defined(CONFIX_X86_64) && defined(CONFIG_X86_MCE)
+#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
        return atomic_read(&mce_entry) > 0;
 #endif
        return 0;
@@ -88,14 +92,6 @@ static inline unsigned int get_timer_irqs(int cpu)
 #endif
 }
 
-/* Run after command line and cpu_init init, but before all other checks */
-void nmi_watchdog_default(void)
-{
-       if (nmi_watchdog != NMI_DEFAULT)
-               return;
-       nmi_watchdog = NMI_NONE;
-}
-
 #ifdef CONFIG_SMP
 /*
  * The performance counters used by NMI_LOCAL_APIC don't trigger when
@@ -118,18 +114,37 @@ static __init void nmi_cpu_busy(void *data)
 }
 #endif
 
+static void report_broken_nmi(int cpu, int *prev_nmi_count)
+{
+       printk(KERN_CONT "\n");
+
+       printk(KERN_WARNING
+               "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+                       cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
+
+       printk(KERN_WARNING
+               "Please report this to bugzilla.kernel.org,\n");
+       printk(KERN_WARNING
+               "and attach the output of the 'dmesg' command.\n");
+
+       per_cpu(wd_enabled, cpu) = 0;
+       atomic_dec(&nmi_active);
+}
+
+static void __acpi_nmi_disable(void *__unused)
+{
+       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+}
+
 int __init check_nmi_watchdog(void)
 {
        unsigned int *prev_nmi_count;
        int cpu;
 
-       if (nmi_watchdog == NMI_NONE || nmi_watchdog == NMI_DISABLED)
-               return 0;
-
-       if (!atomic_read(&nmi_active))
+       if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
                return 0;
 
-       prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
+       prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
        if (!prev_nmi_count)
                goto error;
 
@@ -137,7 +152,7 @@ int __init check_nmi_watchdog(void)
 
 #ifdef CONFIG_SMP
        if (nmi_watchdog == NMI_LOCAL_APIC)
-               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
+               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
 #endif
 
        for_each_possible_cpu(cpu)
@@ -148,15 +163,8 @@ int __init check_nmi_watchdog(void)
        for_each_online_cpu(cpu) {
                if (!per_cpu(wd_enabled, cpu))
                        continue;
-               if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
-                       printk(KERN_WARNING "WARNING: CPU#%d: NMI "
-                               "appears to be stuck (%d->%d)!\n",
-                               cpu,
-                               prev_nmi_count[cpu],
-                               get_nmi_count(cpu));
-                       per_cpu(wd_enabled, cpu) = 0;
-                       atomic_dec(&nmi_active);
-               }
+               if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
+                       report_broken_nmi(cpu, prev_nmi_count);
        }
        endflag = 1;
        if (!atomic_read(&nmi_active)) {
@@ -175,17 +183,22 @@ int __init check_nmi_watchdog(void)
 
        kfree(prev_nmi_count);
        return 0;
-
 error:
+       if (nmi_watchdog == NMI_IO_APIC) {
+               if (!timer_through_8259)
+                       disable_8259A_irq(0);
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+       }
+
 #ifdef CONFIG_X86_32
-       timer_ack = !cpu_has_tsc;
+       timer_ack = 0;
 #endif
        return -1;
 }
 
 static int __init setup_nmi_watchdog(char *str)
 {
-       int nmi;
+       unsigned int nmi;
 
        if (!strncmp(str, "panic", 5)) {
                panic_on_timeout = 1;
@@ -195,12 +208,17 @@ static int __init setup_nmi_watchdog(char *str)
                ++str;
        }
 
-       get_option(&str, &nmi);
-
-       if (nmi >= NMI_INVALID || nmi < NMI_NONE)
-               return 0;
+       if (!strncmp(str, "lapic", 5))
+               nmi_watchdog = NMI_LOCAL_APIC;
+       else if (!strncmp(str, "ioapic", 6))
+               nmi_watchdog = NMI_IO_APIC;
+       else {
+               get_option(&str, &nmi);
+               if (nmi >= NMI_INVALID)
+                       return 0;
+               nmi_watchdog = nmi;
+       }
 
-       nmi_watchdog = nmi;
        return 1;
 }
 __setup("nmi_watchdog=", setup_nmi_watchdog);
@@ -269,7 +287,7 @@ late_initcall(init_lapic_nmi_sysfs);
 
 static void __acpi_nmi_enable(void *__unused)
 {
-       apic_write_around(APIC_LVT0, APIC_DM_NMI);
+       apic_write(APIC_LVT0, APIC_DM_NMI);
 }
 
 /*
@@ -278,12 +296,7 @@ static void __acpi_nmi_enable(void *__unused)
 void acpi_nmi_enable(void)
 {
        if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-               on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
-}
-
-static void __acpi_nmi_disable(void *__unused)
-{
-       apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+               on_each_cpu(__acpi_nmi_enable, NULL, 1);
 }
 
 /*
@@ -292,7 +305,16 @@ static void __acpi_nmi_disable(void *__unused)
 void acpi_nmi_disable(void)
 {
        if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-               on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+}
+
+/*
+ * This function is called as soon the LAPIC NMI watchdog driver has everything
+ * in place and it's ready to check if the NMIs belong to the NMI watchdog
+ */
+void cpu_nmi_set_wd_enabled(void)
+{
+       __get_cpu_var(wd_enabled) = 1;
 }
 
 void setup_apic_nmi_watchdog(void *unused)
@@ -307,8 +329,6 @@ void setup_apic_nmi_watchdog(void *unused)
 
        switch (nmi_watchdog) {
        case NMI_LOCAL_APIC:
-                /* enable it before to avoid race with handler */
-               __get_cpu_var(wd_enabled) = 1;
                if (lapic_watchdog_init(nmi_hz) < 0) {
                        __get_cpu_var(wd_enabled) = 0;
                        return;
@@ -323,13 +343,14 @@ void setup_apic_nmi_watchdog(void *unused)
 void stop_apic_nmi_watchdog(void *unused)
 {
        /* only support LOCAL and IO APICs for now */
-       if (nmi_watchdog != NMI_LOCAL_APIC &&
-           nmi_watchdog != NMI_IO_APIC)
+       if (!nmi_watchdog_active())
                return;
        if (__get_cpu_var(wd_enabled) == 0)
                return;
        if (nmi_watchdog == NMI_LOCAL_APIC)
                lapic_watchdog_stop();
+       else
+               __acpi_nmi_disable(NULL);
        __get_cpu_var(wd_enabled) = 0;
        atomic_dec(&nmi_active);
 }
@@ -354,8 +375,7 @@ static DEFINE_PER_CPU(int, nmi_touch);
 
 void touch_nmi_watchdog(void)
 {
-       if (nmi_watchdog == NMI_LOCAL_APIC ||
-               nmi_watchdog == NMI_IO_APIC) {
+       if (nmi_watchdog_active()) {
                unsigned cpu;
 
                /*
@@ -456,6 +476,31 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
 
 #ifdef CONFIG_SYSCTL
 
+static void enable_ioapic_nmi_watchdog_single(void *unused)
+{
+       __get_cpu_var(wd_enabled) = 1;
+       atomic_inc(&nmi_active);
+       __acpi_nmi_enable(NULL);
+}
+
+static void enable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
+       touch_nmi_watchdog();
+}
+
+static void disable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
+}
+
+static int __init setup_unknown_nmi_panic(char *str)
+{
+       unknown_nmi_panic = 1;
+       return 1;
+}
+__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
+
 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
 {
        unsigned char reason = get_nmi_reason();
@@ -480,29 +525,22 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
        if (!!old_state == !!nmi_watchdog_enabled)
                return 0;
 
-       if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
+       if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
                printk(KERN_WARNING
                        "NMI watchdog is permanently disabled\n");
                return -EIO;
        }
 
-       /* if nmi_watchdog is not set yet, then set it */
-       nmi_watchdog_default();
-
-#ifdef CONFIG_X86_32
-       if (nmi_watchdog == NMI_NONE) {
-               if (lapic_watchdog_ok())
-                       nmi_watchdog = NMI_LOCAL_APIC;
-               else
-                       nmi_watchdog = NMI_IO_APIC;
-       }
-#endif
-
        if (nmi_watchdog == NMI_LOCAL_APIC) {
                if (nmi_watchdog_enabled)
                        enable_lapic_nmi_watchdog();
                else
                        disable_lapic_nmi_watchdog();
+       } else if (nmi_watchdog == NMI_IO_APIC) {
+               if (nmi_watchdog_enabled)
+                       enable_ioapic_nmi_watchdog();
+               else
+                       disable_ioapic_nmi_watchdog();
        } else {
                printk(KERN_WARNING
                        "NMI watchdog doesn't know what hardware to touch\n");