Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / arch / x86 / kernel / smp.c
index 16c52aa..e6faa33 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     Intel SMP support routines.
  *
- *     (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  *     (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
  *
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
-#ifdef CONFIG_X86_32
-#include <mach_apic.h>
 #include <mach_ipi.h>
-#else
-#include <asm/mach_apic.h>
-#endif
+#include <mach_apic.h>
 /*
  *     Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -122,147 +118,33 @@ static void native_smp_send_reschedule(int cpu)
                WARN_ON(1);
                return;
        }
-       send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+       send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
 }
 
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- */
-static DEFINE_SPINLOCK(call_lock);
-
-struct call_data_struct {
-       void (*func) (void *info);
-       void *info;
-       atomic_t started;
-       atomic_t finished;
-       int wait;
-};
-
-void lock_ipi_call_lock(void)
+void native_send_call_func_single_ipi(int cpu)
 {
-       spin_lock_irq(&call_lock);
+       send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
 }
 
-void unlock_ipi_call_lock(void)
+void native_send_call_func_ipi(const struct cpumask *mask)
 {
-       spin_unlock_irq(&call_lock);
-}
-
-static struct call_data_struct *call_data;
-
-static void __smp_call_function(void (*func) (void *info), void *info,
-                               int nonatomic, int wait)
-{
-       struct call_data_struct data;
-       int cpus = num_online_cpus() - 1;
+       cpumask_var_t allbutself;
 
-       if (!cpus)
+       if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
+               send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
                return;
-
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       call_data = &data;
-       mb();
-
-       /* Send a message to all other CPUs and wait for them to respond */
-       send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
-}
-
-
-/**
- * smp_call_function_mask(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on.  Must not include the current cpu.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
-  * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int
-native_smp_call_function_mask(cpumask_t mask,
-                             void (*func)(void *), void *info,
-                             int wait)
-{
-       struct call_data_struct data;
-       cpumask_t allbutself;
-       int cpus;
-
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
-
-       /* Holding any lock stops cpus from going down. */
-       spin_lock(&call_lock);
-
-       allbutself = cpu_online_map;
-       cpu_clear(smp_processor_id(), allbutself);
-
-       cpus_and(mask, mask, allbutself);
-       cpus = cpus_weight(mask);
-
-       if (!cpus) {
-               spin_unlock(&call_lock);
-               return 0;
        }
 
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       call_data = &data;
-       wmb();
+       cpumask_copy(allbutself, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), allbutself);
 
-       /* Send a message to other CPUs */
-       if (cpus_equal(mask, allbutself))
+       if (cpumask_equal(mask, allbutself) &&
+           cpumask_equal(cpu_online_mask, cpu_callout_mask))
                send_IPI_allbutself(CALL_FUNCTION_VECTOR);
        else
                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
-       spin_unlock(&call_lock);
-
-       return 0;
-}
-
-static void stop_this_cpu(void *dummy)
-{
-       local_irq_disable();
-       /*
-        * Remove this CPU:
-        */
-       cpu_clear(smp_processor_id(), cpu_online_map);
-       disable_local_APIC();
-       if (hlt_works(smp_processor_id()))
-               for (;;) halt();
-       for (;;);
+       free_cpumask_var(allbutself);
 }
 
 /*
@@ -271,18 +153,13 @@ static void stop_this_cpu(void *dummy)
 
 static void native_smp_send_stop(void)
 {
-       int nolock;
        unsigned long flags;
 
        if (reboot_force)
                return;
 
-       /* Don't deadlock on the call lock in panic */
-       nolock = !spin_trylock(&call_lock);
+       smp_call_function(stop_this_cpu, NULL, 0);
        local_irq_save(flags);
-       __smp_call_function(stop_this_cpu, NULL, 0, 0);
-       if (!nolock)
-               spin_unlock(&call_lock);
        disable_local_APIC();
        local_irq_restore(flags);
 }
@@ -295,53 +172,41 @@ static void native_smp_send_stop(void)
 void smp_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_APIC_irq();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_resched_count++;
-#else
-       add_pda(irq_resched_count, 1);
-#endif
+       inc_irq_stat(irq_resched_count);
 }
 
 void smp_call_function_interrupt(struct pt_regs *regs)
 {
-       void (*func) (void *info) = call_data->func;
-       void *info = call_data->info;
-       int wait = call_data->wait;
-
        ack_APIC_irq();
-       /*
-        * Notify initiating CPU that I've grabbed the data and am
-        * about to execute the function
-        */
-       mb();
-       atomic_inc(&call_data->started);
-       /*
-        * At this point the info structure may be out of scope unless wait==1
-        */
        irq_enter();
-       (*func)(info);
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       generic_smp_call_function_interrupt();
+       inc_irq_stat(irq_call_count);
        irq_exit();
+}
 
-       if (wait) {
-               mb();
-               atomic_inc(&call_data->finished);
-       }
+void smp_call_function_single_interrupt(struct pt_regs *regs)
+{
+       ack_APIC_irq();
+       irq_enter();
+       generic_smp_call_function_single_interrupt();
+       inc_irq_stat(irq_call_count);
+       irq_exit();
 }
 
 struct smp_ops smp_ops = {
        .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
        .smp_prepare_cpus = native_smp_prepare_cpus,
-       .cpu_up = native_cpu_up,
        .smp_cpus_done = native_smp_cpus_done,
 
        .smp_send_stop = native_smp_send_stop,
        .smp_send_reschedule = native_smp_send_reschedule,
-       .smp_call_function_mask = native_smp_call_function_mask,
+
+       .cpu_up = native_cpu_up,
+       .cpu_die = native_cpu_die,
+       .cpu_disable = native_cpu_disable,
+       .play_dead = native_play_dead,
+
+       .send_call_func_ipi = native_send_call_func_ipi,
+       .send_call_func_single_ipi = native_send_call_func_single_ipi,
 };
 EXPORT_SYMBOL_GPL(smp_ops);
-