[POWERPC] fsl_soc: rtc-ds1307 support
[safe/jmp/linux-2.6] / arch / powerpc / kernel / smp.c
index e28a139..d30f08f 100644 (file)
@@ -17,7 +17,6 @@
 
 #undef DEBUG
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -31,6 +30,7 @@
 #include <linux/sysdev.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/topology.h>
 
 #include <asm/ptrace.h>
 #include <asm/atomic.h>
 #include <asm/cputable.h>
 #include <asm/system.h>
 #include <asm/mpic.h>
-#include <asm/systemcfg.h>
+#include <asm/vdso_datapage.h>
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #endif
 
-int smp_hw_index[NR_CPUS];
-struct thread_info *secondary_ti;
-
 #ifdef DEBUG
+#include <asm/udbg.h>
 #define DBG(fmt...) udbg_printf(fmt)
 #else
 #define DBG(fmt...)
 #endif
 
+int smp_hw_index[NR_CPUS];
+struct thread_info *secondary_ti;
+
 cpumask_t cpu_possible_map = CPU_MASK_NONE;
 cpumask_t cpu_online_map = CPU_MASK_NONE;
 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_sibling_map);
 
 /* SMP operations for this machine */
 struct smp_ops_t *smp_ops;
@@ -74,28 +76,7 @@ void smp_call_function_interrupt(void);
 
 int smt_enabled_at_boot = 1;
 
-#ifdef CONFIG_MPIC
-int __init smp_mpic_probe(void)
-{
-       int nr_cpus;
-
-       DBG("smp_mpic_probe()...\n");
-
-       nr_cpus = cpus_weight(cpu_possible_map);
-
-       DBG("nr_cpus: %d\n", nr_cpus);
-
-       if (nr_cpus > 1)
-               mpic_request_ipis();
-
-       return nr_cpus;
-}
-
-void __devinit smp_mpic_setup_cpu(int cpu)
-{
-       mpic_setup_this_cpu();
-}
-#endif /* CONFIG_MPIC */
+static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
 
 #ifdef CONFIG_PPC64
 void __devinit smp_generic_kick_cpu(int nr)
@@ -112,7 +93,7 @@ void __devinit smp_generic_kick_cpu(int nr)
 }
 #endif
 
-void smp_message_recv(int msg, struct pt_regs *regs)
+void smp_message_recv(int msg)
 {
        switch(msg) {
        case PPC_MSG_CALL_FUNCTION:
@@ -122,11 +103,16 @@ void smp_message_recv(int msg, struct pt_regs *regs)
                /* XXX Do we have to do this? */
                set_need_resched();
                break;
-#ifdef CONFIG_DEBUGGER
        case PPC_MSG_DEBUGGER_BREAK:
-               debugger_ipi(regs);
+               if (crash_ipi_function_ptr) {
+                       crash_ipi_function_ptr(get_irq_regs());
+                       break;
+               }
+#ifdef CONFIG_DEBUGGER
+               debugger_ipi(get_irq_regs());
                break;
-#endif
+#endif /* CONFIG_DEBUGGER */
+               /* FALLTHROUGH */
        default:
                printk("SMP %d: smp_message_recv(): unknown msg %d\n",
                       smp_processor_id(), msg);
@@ -136,13 +122,26 @@ void smp_message_recv(int msg, struct pt_regs *regs)
 
 void smp_send_reschedule(int cpu)
 {
-       smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
+       if (likely(smp_ops))
+               smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
 }
 
 #ifdef CONFIG_DEBUGGER
 void smp_send_debugger_break(int cpu)
 {
-       smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+       if (likely(smp_ops))
+               smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+}
+#endif
+
+#ifdef CONFIG_KEXEC
+void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+       crash_ipi_function_ptr = crash_ipi_callback;
+       if (crash_ipi_callback && smp_ops) {
+               mb();
+               smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
+       }
 }
 #endif
 
@@ -153,11 +152,6 @@ static void stop_this_cpu(void *dummy)
                ;
 }
 
-void smp_send_stop(void)
-{
-       smp_call_function(stop_this_cpu, NULL, 1, 0);
-}
-
 /*
  * Structure and data for smp_call_function(). This is designed to minimise
  * static memory requirements. It also looks cleaner.
@@ -177,10 +171,10 @@ static struct call_data_struct {
 #define SMP_CALL_TIMEOUT       8
 
 /*
- * This function sends a 'generic call function' IPI to all other CPUs
- * in the system.
+ * These functions send a 'generic call function' IPI to other online
+ * CPUS in the system.
  *
- * [SUMMARY] Run a function on all other CPUs.
+ * [SUMMARY] Run a function on other CPUs.
  * <func> The function to run. This must be fast and non-blocking.
  * <info> An arbitrary pointer to pass to the function.
  * <nonatomic> currently unused.
@@ -191,15 +185,16 @@ static struct call_data_struct {
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-                      int wait)
-{ 
+int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
+                       int wait, cpumask_t map)
+{
        struct call_data_struct data;
-       int ret = -1, cpus;
+       int ret = -1, num_cpus;
+       int cpu;
        u64 timeout;
 
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
+       if (unlikely(smp_ops == NULL))
+               return ret;
 
        data.func = func;
        data.info = info;
@@ -209,48 +204,55 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
                atomic_set(&data.finished, 0);
 
        spin_lock(&call_lock);
-       /* Must grab online cpu count with preempt disabled, otherwise
-        * it can change. */
-       cpus = num_online_cpus() - 1;
-       if (!cpus) {
-               ret = 0;
-               goto out;
-       }
+
+       /* remove 'self' from the map */
+       if (cpu_isset(smp_processor_id(), map))
+               cpu_clear(smp_processor_id(), map);
+
+       /* sanity check the map, remove any non-online processors. */
+       cpus_and(map, map, cpu_online_map);
+
+       num_cpus = cpus_weight(map);
+       if (!num_cpus)
+               goto done;
 
        call_data = &data;
        smp_wmb();
-       /* Send a message to all other CPUs and wait for them to respond */
-       smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
+       /* Send a message to all CPUs in the map */
+       for_each_cpu_mask(cpu, map)
+               smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 
        timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
 
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus) {
+       /* Wait for indication that they have received the message */
+       while (atomic_read(&data.started) != num_cpus) {
                HMT_low();
                if (get_tb() >= timeout) {
                        printk("smp_call_function on cpu %d: other cpus not "
-                              "responding (%d)\n", smp_processor_id(),
-                              atomic_read(&data.started));
+                               "responding (%d)\n", smp_processor_id(),
+                               atomic_read(&data.started));
                        debugger(NULL);
                        goto out;
                }
        }
 
+       /* optionally wait for the CPUs to complete */
        if (wait) {
-               while (atomic_read(&data.finished) != cpus) {
+               while (atomic_read(&data.finished) != num_cpus) {
                        HMT_low();
                        if (get_tb() >= timeout) {
                                printk("smp_call_function on cpu %d: other "
-                                      "cpus not finishing (%d/%d)\n",
-                                      smp_processor_id(),
-                                      atomic_read(&data.finished),
-                                      atomic_read(&data.started));
+                                       "cpus not finishing (%d/%d)\n",
+                                       smp_processor_id(),
+                                       atomic_read(&data.finished),
+                                       atomic_read(&data.started));
                                debugger(NULL);
                                goto out;
                        }
                }
        }
 
+ done:
        ret = 0;
 
  out:
@@ -260,8 +262,52 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
        return ret;
 }
 
+static int __smp_call_function(void (*func)(void *info), void *info,
+                              int nonatomic, int wait)
+{
+       return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
+}
+
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+                       int wait)
+{
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
+       return __smp_call_function(func, info, nonatomic, wait);
+}
 EXPORT_SYMBOL(smp_call_function);
 
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
+                       int wait)
+{
+       cpumask_t map = CPU_MASK_NONE;
+       int ret = 0;
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
+       if (!cpu_online(cpu))
+               return -EINVAL;
+
+       cpu_set(cpu, map);
+       if (cpu != get_cpu())
+               ret = smp_call_function_map(func,info,nonatomic,wait,map);
+       else {
+               local_irq_disable();
+               func(info);
+               local_irq_enable();
+       }
+       put_cpu();
+       return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+void smp_send_stop(void)
+{
+       __smp_call_function(stop_this_cpu, NULL, 1, 0);
+}
+
 void smp_call_function_interrupt(void)
 {
        void (*func) (void *info);
@@ -318,8 +364,8 @@ static void __init smp_create_idle(unsigned int cpu)
 #ifdef CONFIG_PPC64
        paca[cpu].__current = p;
 #endif
-       current_set[cpu] = p->thread_info;
-       p->thread_info->cpu = cpu;
+       current_set[cpu] = task_thread_info(p);
+       task_thread_info(p)->cpu = cpu;
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -338,11 +384,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        smp_store_cpu_info(boot_cpuid);
        cpu_callin_map[boot_cpuid] = 1;
 
-       max_cpus = smp_ops->probe();
+       if (smp_ops)
+               max_cpus = smp_ops->probe();
+       else
+               max_cpus = 1;
  
        smp_space_timers(max_cpus);
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                if (cpu != boot_cpuid)
                        smp_create_idle(cpu);
 }
@@ -355,7 +404,7 @@ void __devinit smp_prepare_boot_cpu(void)
 #ifdef CONFIG_PPC64
        paca[boot_cpuid].__current = current;
 #endif
-       current_set[boot_cpuid] = current->thread_info;
+       current_set[boot_cpuid] = task_thread_info(current);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -371,7 +420,7 @@ int generic_cpu_disable(void)
 
        cpu_clear(cpu, cpu_online_map);
 #ifdef CONFIG_PPC64
-       _systemcfg->processorCount--;
+       vdso_data->processorCount--;
        fixup_irqs(cpu_online_map);
 #endif
        return 0;
@@ -423,10 +472,6 @@ void generic_mach_cpu_die(void)
        smp_wmb();
        while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
                cpu_relax();
-
-#ifdef CONFIG_PPC64
-       flush_tlb_pending();
-#endif
        cpu_set(cpu, cpu_online_map);
        local_irq_enable();
 }
@@ -434,13 +479,13 @@ void generic_mach_cpu_die(void)
 
 static int __devinit cpu_enable(unsigned int cpu)
 {
-       if (smp_ops->cpu_enable)
+       if (smp_ops && smp_ops->cpu_enable)
                return smp_ops->cpu_enable(cpu);
 
        return -ENOSYS;
 }
 
-int __devinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu)
 {
        int c;
 
@@ -448,13 +493,10 @@ int __devinit __cpu_up(unsigned int cpu)
        if (!cpu_enable(cpu))
                return 0;
 
-       if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
+       if (smp_ops == NULL ||
+           (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
                return -EINVAL;
 
-#ifdef CONFIG_PPC64
-       paca[cpu].default_decr = tb_ticks_per_jiffy;
-#endif
-
        /* Make sure callin-map entry is 0 (can be leftover a CPU
         * hotplug
         */
@@ -476,7 +518,7 @@ int __devinit __cpu_up(unsigned int cpu)
         * -- Cort
         */
        if (system_state < SYSTEM_RUNNING)
-               for (c = 5000; c && !cpu_callin_map[cpu]; c--)
+               for (c = 50000; c && !cpu_callin_map[cpu]; c--)
                        udelay(100);
 #ifdef CONFIG_HOTPLUG_CPU
        else
@@ -524,6 +566,11 @@ int __devinit start_secondary(void *unused)
        if (smp_ops->take_timebase)
                smp_ops->take_timebase();
 
+       if (system_state > SYSTEM_BOOTING)
+               snapshot_timebase();
+
+       secondary_cpu_time_init();
+
        spin_lock(&call_lock);
        cpu_set(cpu, cpu_online_map);
        spin_unlock(&call_lock);
@@ -550,9 +597,14 @@ void __init smp_cpus_done(unsigned int max_cpus)
        old_mask = current->cpus_allowed;
        set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
        
-       smp_ops->setup_cpu(boot_cpuid);
+       if (smp_ops)
+               smp_ops->setup_cpu(boot_cpuid);
 
        set_cpus_allowed(current, old_mask);
+
+       snapshot_timebases();
+
+       dump_numa_cpu_topology();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU