include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / kernel / smp.c
index 892e7c3..d801210 100644 (file)
@@ -2,7 +2,7 @@
  *     Intel SMP support routines.
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
  *
  *     i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
 #include <linux/cpu.h>
+#include <linux/gfp.h>
 
 #include <asm/mtrr.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
-#include <mach_ipi.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
 /*
  *     Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -151,14 +151,40 @@ void native_send_call_func_ipi(const struct cpumask *mask)
  * this function calls the 'stop' function on all other CPUs in the system.
  */
 
+asmlinkage void smp_reboot_interrupt(void)
+{
+       ack_APIC_irq();
+       irq_enter();
+       stop_this_cpu(NULL);
+       irq_exit();
+}
+
 static void native_smp_send_stop(void)
 {
        unsigned long flags;
+       unsigned long wait;
 
        if (reboot_force)
                return;
 
-       smp_call_function(stop_this_cpu, NULL, 0);
+       /*
+        * Use an own vector here because smp_call_function
+        * does lots of things not suitable in a panic situation.
+        * On most systems we could also use an NMI here,
+        * but there are a few systems around where NMI
+        * is problematic so stay with an non NMI for now
+        * (this implies we cannot stop CPUs spinning with irq off
+        * currently)
+        */
+       if (num_online_cpus() > 1) {
+               apic->send_IPI_allbutself(REBOOT_VECTOR);
+
+               /* Don't wait longer than a second */
+               wait = USEC_PER_SEC;
+               while (num_online_cpus() > 1 && wait--)
+                       udelay(1);
+       }
+
        local_irq_save(flags);
        disable_local_APIC();
        local_irq_restore(flags);
@@ -173,6 +199,9 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_APIC_irq();
        inc_irq_stat(irq_resched_count);
+       /*
+        * KVM uses this interrupt to force a cpu out of guest mode
+        */
 }
 
 void smp_call_function_interrupt(struct pt_regs *regs)
@@ -194,19 +223,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
 }
 
 struct smp_ops smp_ops = {
-       .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
-       .smp_prepare_cpus = native_smp_prepare_cpus,
-       .smp_cpus_done = native_smp_cpus_done,
+       .smp_prepare_boot_cpu   = native_smp_prepare_boot_cpu,
+       .smp_prepare_cpus       = native_smp_prepare_cpus,
+       .smp_cpus_done          = native_smp_cpus_done,
 
-       .smp_send_stop = native_smp_send_stop,
-       .smp_send_reschedule = native_smp_send_reschedule,
+       .smp_send_stop          = native_smp_send_stop,
+       .smp_send_reschedule    = native_smp_send_reschedule,
 
-       .cpu_up = native_cpu_up,
-       .cpu_die = native_cpu_die,
-       .cpu_disable = native_cpu_disable,
-       .play_dead = native_play_dead,
+       .cpu_up                 = native_cpu_up,
+       .cpu_die                = native_cpu_die,
+       .cpu_disable            = native_cpu_disable,
+       .play_dead              = native_play_dead,
 
-       .send_call_func_ipi = native_send_call_func_ipi,
+       .send_call_func_ipi     = native_send_call_func_ipi,
        .send_call_func_single_ipi = native_send_call_func_single_ipi,
 };
 EXPORT_SYMBOL_GPL(smp_ops);