smp_call_function: get rid of the unused nonatomic/retry argument
[safe/jmp/linux-2.6] / arch / sparc64 / kernel / smp.c
index 8aca4b1..c099d96 100644 (file)
@@ -1,6 +1,6 @@
 /* smp.c: Sparc64 SMP support.
  *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  */
 
 #include <linux/module.h>
@@ -10,7 +10,6 @@
 #include <linux/pagemap.h>
 #include <linux/threads.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/delay.h>
@@ -21,7 +20,7 @@
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/bootmem.h>
+#include <linux/lmb.h>
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/cpudata.h>
+#include <asm/hvtramp.h>
+#include <asm/io.h>
+#include <asm/timer.h>
 
 #include <asm/irq.h>
+#include <asm/irq_regs.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/oplib.h>
 #include <asm/uaccess.h>
-#include <asm/timer.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/mdesc.h>
+#include <asm/ldc.h>
+#include <asm/hypervisor.h>
 
-extern int linux_num_cpus;
-extern void calibrate_delay(void);
-
-/* Please don't make this stuff initdata!!!  --DaveM */
-static unsigned char boot_cpu_id;
+int sparc64_multi_core __read_mostly;
 
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
+EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+EXPORT_SYMBOL(cpu_core_map);
+
 static cpumask_t smp_commenced_mask;
-static cpumask_t cpu_callout_map;
 
 void smp_info(struct seq_file *m)
 {
        int i;
        
        seq_printf(m, "State:\n");
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "CPU%d:\t\tonline\n", i);
-       }
+       for_each_online_cpu(i)
+               seq_printf(m, "CPU%d:\t\tonline\n", i);
 }
 
 void smp_bogo(struct seq_file *m)
 {
        int i;
        
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "Cpu%dBogo\t: %lu.%02lu\n"
-                                  "Cpu%dClkTck\t: %016lx\n",
-                                  i, cpu_data(i).udelay_val / (500000/HZ),
-                                  (cpu_data(i).udelay_val / (5000/HZ)) % 100,
-                                  i, cpu_data(i).clock_tick);
+       for_each_online_cpu(i)
+               seq_printf(m,
+                          "Cpu%dClkTck\t: %016lx\n",
+                          i, cpu_data(i).clock_tick);
 }
 
-void __init smp_store_cpu_info(int id)
-{
-       int cpu_node;
-
-       /* multiplier and counter set by
-          smp_setup_percpu_timer()  */
-       cpu_data(id).udelay_val                 = loops_per_jiffy;
-
-       cpu_find_by_mid(id, &cpu_node);
-       cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
-                                                    "clock-frequency", 0);
-
-       cpu_data(id).pgcache_size               = 0;
-       cpu_data(id).pte_cache[0]               = NULL;
-       cpu_data(id).pte_cache[1]               = NULL;
-       cpu_data(id).pgd_cache                  = NULL;
-       cpu_data(id).idle_volume                = 1;
-
-       cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
-                                                     16 * 1024);
-       cpu_data(id).dcache_line_size =
-               prom_getintdefault(cpu_node, "dcache-line-size", 32);
-       cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
-                                                     16 * 1024);
-       cpu_data(id).icache_line_size =
-               prom_getintdefault(cpu_node, "icache-line-size", 32);
-       cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
-                                                     4 * 1024 * 1024);
-       cpu_data(id).ecache_line_size =
-               prom_getintdefault(cpu_node, "ecache-line-size", 64);
-       printk("CPU[%d]: Caches "
-              "D[sz(%d):line_sz(%d)] "
-              "I[sz(%d):line_sz(%d)] "
-              "E[sz(%d):line_sz(%d)]\n",
-              id,
-              cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
-              cpu_data(id).icache_size, cpu_data(id).icache_line_size,
-              cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
-}
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
 
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
 
 static volatile unsigned long callin_flag = 0;
 
-extern void inherit_locked_prom_mappings(int save_p);
-
-static inline void cpu_setup_percpu_base(unsigned long cpu_id)
-{
-       __asm__ __volatile__("mov       %0, %%g5\n\t"
-                            "stxa      %0, [%1] %2\n\t"
-                            "membar    #Sync"
-                            : /* no outputs */
-                            : "r" (__per_cpu_offset(cpu_id)),
-                              "r" (TSB_REG), "i" (ASI_IMMU));
-}
-
-void __init smp_callin(void)
+void __cpuinit smp_callin(void)
 {
        int cpuid = hard_smp_processor_id();
 
-       inherit_locked_prom_mappings(0);
+       __local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-       __flush_tlb_all();
+       if (tlb_type == hypervisor)
+               sun4v_ktsb_register();
 
-       cpu_setup_percpu_base(cpuid);
+       __flush_tlb_all();
 
-       smp_setup_percpu_timer();
+       setup_sparc64_timer();
 
        if (cheetah_pcache_forced_on)
                cheetah_enable_pcache();
 
        local_irq_enable();
 
-       calibrate_delay();
-       smp_store_cpu_info(cpuid);
        callin_flag = 1;
        __asm__ __volatile__("membar #Sync\n\t"
                             "flush  %%g6" : : : "memory");
@@ -167,7 +120,9 @@ void __init smp_callin(void)
        while (!cpu_isset(cpuid, smp_commenced_mask))
                rmb();
 
+       spin_lock(&call_lock);
        cpu_set(cpuid, cpu_online_map);
+       spin_unlock(&call_lock);
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
@@ -179,8 +134,6 @@ void cpu_panic(void)
        panic("SMP bolixed\n");
 }
 
-static unsigned long current_tick_offset __read_mostly;
-
 /* This tick register synchronization scheme is taken entirely from
  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  *
@@ -263,7 +216,7 @@ void smp_synchronize_tick_client(void)
                                } else
                                        adj = -delta;
 
-                               tick_ops->add_tick(adj, current_tick_offset);
+                               tick_ops->add_tick(adj);
                        }
 #if DEBUG_TICK_SYNC
                        t[i].rt = rt;
@@ -281,8 +234,9 @@ void smp_synchronize_tick_client(void)
                       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 #endif
 
-       printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
-              "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
+       printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
+              "(last diff %ld cycles, maxerr %lu cycles)\n",
+              smp_processor_id(), delta, rt);
 }
 
 static void smp_start_sync_tick_client(int cpu);
@@ -317,6 +271,68 @@ static void smp_synchronize_one_tick(int cpu)
        spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+/* XXX Put this in some common place. XXX */
+static unsigned long kimage_addr_to_ra(void *p)
+{
+       unsigned long val = (unsigned long) p;
+
+       return kern_base + (val - KERNBASE);
+}
+
+static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+{
+       extern unsigned long sparc64_ttable_tl0;
+       extern unsigned long kern_locked_tte_data;
+       struct hvtramp_descr *hdesc;
+       unsigned long trampoline_ra;
+       struct trap_per_cpu *tb;
+       u64 tte_vaddr, tte_data;
+       unsigned long hv_err;
+       int i;
+
+       hdesc = kzalloc(sizeof(*hdesc) +
+                       (sizeof(struct hvtramp_mapping) *
+                        num_kernel_image_mappings - 1),
+                       GFP_KERNEL);
+       if (!hdesc) {
+               printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
+                      "hvtramp_descr.\n");
+               return;
+       }
+
+       hdesc->cpu = cpu;
+       hdesc->num_mappings = num_kernel_image_mappings;
+
+       tb = &trap_block[cpu];
+       tb->hdesc = hdesc;
+
+       hdesc->fault_info_va = (unsigned long) &tb->fault_info;
+       hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
+
+       hdesc->thread_reg = thread_reg;
+
+       tte_vaddr = (unsigned long) KERNBASE;
+       tte_data = kern_locked_tte_data;
+
+       for (i = 0; i < hdesc->num_mappings; i++) {
+               hdesc->maps[i].vaddr = tte_vaddr;
+               hdesc->maps[i].tte   = tte_data;
+               tte_vaddr += 0x400000;
+               tte_data  += 0x400000;
+       }
+
+       trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
+
+       hv_err = sun4v_cpu_start(cpu, trampoline_ra,
+                                kimage_addr_to_ra(&sparc64_ttable_tl0),
+                                __pa(hdesc));
+       if (hv_err)
+               printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
+                      "gives error %lu\n", hv_err);
+}
+#endif
+
 extern unsigned long sparc64_cpu_startup;
 
 /* The OBP cpu startup callback truncates the 3rd arg cookie to
@@ -327,35 +343,53 @@ static struct thread_info *cpu_new_thread = NULL;
 
 static int __devinit smp_boot_one_cpu(unsigned int cpu)
 {
+       struct trap_per_cpu *tb = &trap_block[cpu];
        unsigned long entry =
                (unsigned long)(&sparc64_cpu_startup);
        unsigned long cookie =
                (unsigned long)(&cpu_new_thread);
        struct task_struct *p;
-       int timeout, ret, cpu_node;
+       int timeout, ret;
 
        p = fork_idle(cpu);
+       if (IS_ERR(p))
+               return PTR_ERR(p);
        callin_flag = 0;
-       cpu_new_thread = p->thread_info;
-       cpu_set(cpu, cpu_callout_map);
+       cpu_new_thread = task_thread_info(p);
+
+       if (tlb_type == hypervisor) {
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+               if (ldom_domaining_enabled)
+                       ldom_startcpu_cpuid(cpu,
+                                           (unsigned long) cpu_new_thread);
+               else
+#endif
+                       prom_startcpu_cpuid(cpu, entry, cookie);
+       } else {
+               struct device_node *dp = of_find_node_by_cpuid(cpu);
 
-       cpu_find_by_mid(cpu, &cpu_node);
-       prom_startcpu(cpu_node, entry, cookie);
+               prom_startcpu(dp->node, entry, cookie);
+       }
 
-       for (timeout = 0; timeout < 5000000; timeout++) {
+       for (timeout = 0; timeout < 50000; timeout++) {
                if (callin_flag)
                        break;
                udelay(100);
        }
+
        if (callin_flag) {
                ret = 0;
        } else {
                printk("Processor %d is stuck.\n", cpu);
-               cpu_clear(cpu, cpu_callout_map);
                ret = -ENODEV;
        }
        cpu_new_thread = NULL;
 
+       if (tb->hdesc) {
+               kfree(tb->hdesc);
+               tb->hdesc = NULL;
+       }
+
        return ret;
 }
 
@@ -425,7 +459,7 @@ again:
        }
 }
 
-static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
        u64 pstate;
        int i;
@@ -441,8 +475,8 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
  */
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-       u64 pstate, ver;
-       int nack_busy_id, is_jalapeno;
+       u64 pstate, ver, busy_mask;
+       int nack_busy_id, is_jbus, need_more;
 
        if (cpus_empty(mask))
                return;
@@ -452,11 +486,13 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
         * derivative processor.
         */
        __asm__ ("rdpr %%ver, %0" : "=r" (ver));
-       is_jalapeno = ((ver >> 32) == 0x003e0016);
+       is_jbus = ((ver >> 32) == __JALAPENO_ID ||
+                  (ver >> 32) == __SERRANO_ID);
 
        __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 
 retry:
+       need_more = 0;
        __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
                             : : "r" (pstate), "i" (PSTATE_IE));
 
@@ -471,46 +507,67 @@ retry:
                               "i" (ASI_INTR_W));
 
        nack_busy_id = 0;
+       busy_mask = 0;
        {
                int i;
 
                for_each_cpu_mask(i, mask) {
                        u64 target = (i << 14) | 0x70;
 
-                       if (!is_jalapeno)
+                       if (is_jbus) {
+                               busy_mask |= (0x1UL << (i * 2));
+                       } else {
                                target |= (nack_busy_id << 24);
+                               busy_mask |= (0x1UL <<
+                                             (nack_busy_id * 2));
+                       }
                        __asm__ __volatile__(
                                "stxa   %%g0, [%0] %1\n\t"
                                "membar #Sync\n\t"
                                : /* no outputs */
                                : "r" (target), "i" (ASI_INTR_W));
                        nack_busy_id++;
+                       if (nack_busy_id == 32) {
+                               need_more = 1;
+                               break;
+                       }
                }
        }
 
        /* Now, poll for completion. */
        {
-               u64 dispatch_stat;
+               u64 dispatch_stat, nack_mask;
                long stuck;
 
                stuck = 100000 * nack_busy_id;
+               nack_mask = busy_mask << 1;
                do {
                        __asm__ __volatile__("ldxa      [%%g0] %1, %0"
                                             : "=r" (dispatch_stat)
                                             : "i" (ASI_INTR_DISPATCH_STAT));
-                       if (dispatch_stat == 0UL) {
+                       if (!(dispatch_stat & (busy_mask | nack_mask))) {
                                __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                                                     : : "r" (pstate));
+                               if (unlikely(need_more)) {
+                                       int i, cnt = 0;
+                                       for_each_cpu_mask(i, mask) {
+                                               cpu_clear(i, mask);
+                                               cnt++;
+                                               if (cnt == 32)
+                                                       break;
+                                       }
+                                       goto retry;
+                               }
                                return;
                        }
                        if (!--stuck)
                                break;
-               } while (dispatch_stat & 0x5555555555555555UL);
+               } while (dispatch_stat & busy_mask);
 
                __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                                     : : "r" (pstate));
 
-               if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
+               if (dispatch_stat & busy_mask) {
                        /* Busy bits will not clear, continue instead
                         * of freezing up on this cpu.
                         */
@@ -530,7 +587,7 @@ retry:
                        for_each_cpu_mask(i, mask) {
                                u64 check_mask;
 
-                               if (is_jalapeno)
+                               if (is_jbus)
                                        check_mask = (0x2UL << (2*i));
                                else
                                        check_mask = (0x2UL <<
@@ -538,6 +595,8 @@ retry:
                                if ((dispatch_stat & check_mask) == 0)
                                        cpu_clear(i, mask);
                                this_busy_nack += 2;
+                               if (this_busy_nack == 64)
+                                       break;
                        }
 
                        goto retry;
@@ -545,6 +604,158 @@ retry:
        }
 }
 
+/* Multi-cpu list version.  */
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+       struct trap_per_cpu *tb;
+       u16 *cpu_list;
+       u64 *mondo;
+       cpumask_t error_mask;
+       unsigned long flags, status;
+       int cnt, retries, this_cpu, prev_sent, i;
+
+       if (cpus_empty(mask))
+               return;
+
+       /* We have to do this whole thing with interrupts fully disabled.
+        * Otherwise if we send an xcall from interrupt context it will
+        * corrupt both our mondo block and cpu list state.
+        *
+        * One consequence of this is that we cannot use timeout mechanisms
+        * that depend upon interrupts being delivered locally.  So, for
+        * example, we cannot sample jiffies and expect it to advance.
+        *
+        * Fortunately, udelay() uses %stick/%tick so we can use that.
+        */
+       local_irq_save(flags);
+
+       this_cpu = smp_processor_id();
+       tb = &trap_block[this_cpu];
+
+       mondo = __va(tb->cpu_mondo_block_pa);
+       mondo[0] = data0;
+       mondo[1] = data1;
+       mondo[2] = data2;
+       wmb();
+
+       cpu_list = __va(tb->cpu_list_pa);
+
+       /* Setup the initial cpu list.  */
+       cnt = 0;
+       for_each_cpu_mask(i, mask)
+               cpu_list[cnt++] = i;
+
+       cpus_clear(error_mask);
+       retries = 0;
+       prev_sent = 0;
+       do {
+               int forward_progress, n_sent;
+
+               status = sun4v_cpu_mondo_send(cnt,
+                                             tb->cpu_list_pa,
+                                             tb->cpu_mondo_block_pa);
+
+               /* HV_EOK means all cpus received the xcall, we're done.  */
+               if (likely(status == HV_EOK))
+                       break;
+
+               /* First, see if we made any forward progress.
+                *
+                * The hypervisor indicates successful sends by setting
+                * cpu list entries to the value 0xffff.
+                */
+               n_sent = 0;
+               for (i = 0; i < cnt; i++) {
+                       if (likely(cpu_list[i] == 0xffff))
+                               n_sent++;
+               }
+
+               forward_progress = 0;
+               if (n_sent > prev_sent)
+                       forward_progress = 1;
+
+               prev_sent = n_sent;
+
+               /* If we get a HV_ECPUERROR, then one or more of the cpus
+                * in the list are in error state.  Use the cpu_state()
+                * hypervisor call to find out which cpus are in error state.
+                */
+               if (unlikely(status == HV_ECPUERROR)) {
+                       for (i = 0; i < cnt; i++) {
+                               long err;
+                               u16 cpu;
+
+                               cpu = cpu_list[i];
+                               if (cpu == 0xffff)
+                                       continue;
+
+                               err = sun4v_cpu_state(cpu);
+                               if (err >= 0 &&
+                                   err == HV_CPU_STATE_ERROR) {
+                                       cpu_list[i] = 0xffff;
+                                       cpu_set(cpu, error_mask);
+                               }
+                       }
+               } else if (unlikely(status != HV_EWOULDBLOCK))
+                       goto fatal_mondo_error;
+
+               /* Don't bother rewriting the CPU list, just leave the
+                * 0xffff and non-0xffff entries in there and the
+                * hypervisor will do the right thing.
+                *
+                * Only advance timeout state if we didn't make any
+                * forward progress.
+                */
+               if (unlikely(!forward_progress)) {
+                       if (unlikely(++retries > 10000))
+                               goto fatal_mondo_timeout;
+
+                       /* Delay a little bit to let other cpus catch up
+                        * on their cpu mondo queue work.
+                        */
+                       udelay(2 * cnt);
+               }
+       } while (1);
+
+       local_irq_restore(flags);
+
+       if (unlikely(!cpus_empty(error_mask)))
+               goto fatal_mondo_cpu_error;
+
+       return;
+
+fatal_mondo_cpu_error:
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+              "were in error state\n",
+              this_cpu);
+       printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
+       for_each_cpu_mask(i, error_mask)
+               printk("%d ", i);
+       printk("]\n");
+       return;
+
+fatal_mondo_timeout:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+              " progress after %d retries.\n",
+              this_cpu, retries);
+       goto dump_cpu_list_and_out;
+
+fatal_mondo_error:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+              this_cpu, status);
+       printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+              "mondo_block_pa(%lx)\n",
+              this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+
+dump_cpu_list_and_out:
+       printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+       for (i = 0; i < cnt; i++)
+               printk("%u ", cpu_list[i]);
+       printk("]\n");
+}
+
 /* Send cross call to all processors mentioned in MASK
  * except self.
  */
@@ -558,8 +769,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
 
        if (tlb_type == spitfire)
                spitfire_xcall_deliver(data0, data1, data2, mask);
-       else
+       else if (tlb_type == cheetah || tlb_type == cheetah_plus)
                cheetah_xcall_deliver(data0, data1, data2, mask);
+       else
+               hypervisor_xcall_deliver(data0, data1, data2, mask);
        /* NOTE: Caller runs local copy on master. */
 
        put_cpu();
@@ -586,24 +799,27 @@ struct call_data_struct {
        int wait;
 };
 
-static DEFINE_SPINLOCK(call_lock);
 static struct call_data_struct *call_data;
 
 extern unsigned long xcall_call_function;
 
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *info), void *info,
-                     int nonatomic, int wait)
+static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
+                                         int wait, cpumask_t mask)
 {
        struct call_data_struct data;
-       int cpus = num_online_cpus() - 1;
-       long timeout;
-
-       if (!cpus)
-               return 0;
+       int cpus;
 
        /* Can deadlock when called with interrupts disabled */
        WARN_ON(irqs_disabled());
@@ -615,32 +831,29 @@ int smp_call_function(void (*func)(void *info), void *info,
 
        spin_lock(&call_lock);
 
+       cpu_clear(smp_processor_id(), mask);
+       cpus = cpus_weight(mask);
+       if (!cpus)
+               goto out_unlock;
+
        call_data = &data;
+       mb();
 
-       smp_cross_call(&xcall_call_function, 0, 0, 0);
+       smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
 
-       /* 
-        * Wait for other cpus to complete function or at
-        * least snap the call data.
-        */
-       timeout = 1000000;
-       while (atomic_read(&data.finished) != cpus) {
-               if (--timeout <= 0)
-                       goto out_timeout;
-               barrier();
-               udelay(1);
-       }
+       /* Wait for response */
+       while (atomic_read(&data.finished) != cpus)
+               cpu_relax();
 
+out_unlock:
        spin_unlock(&call_lock);
 
        return 0;
+}
 
-out_timeout:
-       spin_unlock(&call_lock);
-       printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
-              (long) num_online_cpus() - 1L,
-              (long) atomic_read(&data.finished));
-       return 0;
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+       return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
 }
 
 void smp_call_function_client(int irq, struct pt_regs *regs)
@@ -660,13 +873,38 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
        }
 }
 
+static void tsb_sync(void *info)
+{
+       struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
+       struct mm_struct *mm = info;
+
+       /* It is not valid to test "currrent->active_mm == mm" here.
+        *
+        * The value of "current" is not changed atomically with
+        * switch_mm().  But that's OK, we just need to check the
+        * current cpu's trap block PGD physical address.
+        */
+       if (tp->pgd_paddr == __pa(mm->pgd))
+               tsb_context_switch(mm);
+}
+
+void smp_tsb_sync(struct mm_struct *mm)
+{
+       sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
+}
+
 extern unsigned long xcall_flush_tlb_mm;
 extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
-extern unsigned long xcall_flush_tlb_all_spitfire;
-extern unsigned long xcall_flush_tlb_all_cheetah;
 extern unsigned long xcall_report_regs;
+#ifdef CONFIG_MAGIC_SYSRQ
+extern unsigned long xcall_fetch_glob_regs;
+#endif
 extern unsigned long xcall_receive_signal;
+extern unsigned long xcall_new_mmu_context_version;
+#ifdef CONFIG_KGDB
+extern unsigned long xcall_kgdb_capture;
+#endif
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -678,7 +916,7 @@ extern atomic_t dcpage_flushes;
 extern atomic_t dcpage_flushes_xcall;
 #endif
 
-static __inline__ void __local_flush_dcache_page(struct page *page)
+static inline void __local_flush_dcache_page(struct page *page)
 {
 #ifdef DCACHE_ALIASING_POSSIBLE
        __flush_dcache_page(page_address(page),
@@ -694,11 +932,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
 void smp_flush_dcache_page_impl(struct page *page, int cpu)
 {
        cpumask_t mask = cpumask_of_cpu(cpu);
-       int this_cpu = get_cpu();
+       int this_cpu;
+
+       if (tlb_type == hypervisor)
+               return;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
        atomic_inc(&dcpage_flushes);
 #endif
+
+       this_cpu = get_cpu();
+
        if (cpu == this_cpu) {
                __local_flush_dcache_page(page);
        } else if (cpu_online(cpu)) {
@@ -714,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                                               __pa(pg_addr),
                                               (u64) pg_addr,
                                               mask);
-               } else {
+               } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 #ifdef DCACHE_ALIASING_POSSIBLE
                        data0 =
                                ((u64)&xcall_flush_dcache_page_cheetah);
@@ -736,7 +980,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        void *pg_addr = page_address(page);
        cpumask_t mask = cpu_online_map;
        u64 data0;
-       int this_cpu = get_cpu();
+       int this_cpu;
+
+       if (tlb_type == hypervisor)
+               return;
+
+       this_cpu = get_cpu();
 
        cpu_clear(this_cpu, mask);
 
@@ -753,7 +1002,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
                                       __pa(pg_addr),
                                       (u64) pg_addr,
                                       mask);
-       } else {
+       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 #ifdef DCACHE_ALIASING_POSSIBLE
                data0 = ((u64)&xcall_flush_dcache_page_cheetah);
                cheetah_xcall_deliver(data0,
@@ -770,39 +1019,73 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        put_cpu();
 }
 
+static void __smp_receive_signal_mask(cpumask_t mask)
+{
+       smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
+}
+
 void smp_receive_signal(int cpu)
 {
        cpumask_t mask = cpumask_of_cpu(cpu);
 
-       if (cpu_online(cpu)) {
-               u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
-
-               if (tlb_type == spitfire)
-                       spitfire_xcall_deliver(data0, 0, 0, mask);
-               else
-                       cheetah_xcall_deliver(data0, 0, 0, mask);
-       }
+       if (cpu_online(cpu))
+               __smp_receive_signal_mask(mask);
 }
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
-       /* Just return, rtrap takes care of the rest. */
        clear_softint(1 << irq);
 }
 
+void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+{
+       struct mm_struct *mm;
+       unsigned long flags;
+
+       clear_softint(1 << irq);
+
+       /* See if we need to allocate a new TLB context because
+        * the version of the one we are using is now out of date.
+        */
+       mm = current->active_mm;
+       if (unlikely(!mm || (mm == &init_mm)))
+               return;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       if (unlikely(!CTX_VALID(mm->context)))
+               get_new_mmu_context(mm);
+
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+
+       load_secondary_context(mm);
+       __flush_tlb_mm(CTX_HWBITS(mm->context),
+                      SECONDARY_CONTEXT);
+}
+
+void smp_new_mmu_context_version(void)
+{
+       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
+}
+#endif
+
 void smp_report_regs(void)
 {
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
 }
 
-void smp_flush_tlb_all(void)
+#ifdef CONFIG_MAGIC_SYSRQ
+void smp_fetch_global_regs(void)
 {
-       if (tlb_type == spitfire)
-               smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
-       else
-               smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
-       __flush_tlb_all();
+       smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
 }
+#endif
 
 /* We know that the window frames of the user have been flushed
  * to the stack before we get here because all callers of us
@@ -945,227 +1228,208 @@ void smp_release(void)
  * can service tlb flush xcalls...
  */
 extern void prom_world(int);
-extern void save_alternate_globals(unsigned long *);
-extern void restore_alternate_globals(unsigned long *);
+
 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
 {
-       unsigned long global_save[24];
-
        clear_softint(1 << irq);
 
        preempt_disable();
 
        __asm__ __volatile__("flushw");
-       save_alternate_globals(global_save);
        prom_world(1);
        atomic_inc(&smp_capture_registry);
        membar_storeload_storestore();
        while (penguins_are_doing_time)
                rmb();
-       restore_alternate_globals(global_save);
        atomic_dec(&smp_capture_registry);
        prom_world(0);
 
        preempt_enable();
 }
 
-#define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
-#define prof_counter(__cpu)            cpu_data(__cpu).counter
+/* /proc/profile writes can call this, don't __init it please. */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
 
-void smp_percpu_timer_interrupt(struct pt_regs *regs)
+void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       unsigned long compare, tick, pstate;
-       int cpu = smp_processor_id();
-       int user = user_mode(regs);
+}
 
-       /*
-        * Check for level 14 softint.
-        */
-       {
-               unsigned long tick_mask = tick_ops->softint_mask;
+void __devinit smp_prepare_boot_cpu(void)
+{
+}
+
+void __devinit smp_fill_in_sib_core_maps(void)
+{
+       unsigned int i;
 
-               if (!(get_softint() & tick_mask)) {
-                       extern void handler_irq(int, struct pt_regs *);
+       for_each_present_cpu(i) {
+               unsigned int j;
 
-                       handler_irq(14, regs);
-                       return;
+               cpus_clear(cpu_core_map[i]);
+               if (cpu_data(i).core_id == 0) {
+                       cpu_set(i, cpu_core_map[i]);
+                       continue;
+               }
+
+               for_each_present_cpu(j) {
+                       if (cpu_data(i).core_id ==
+                           cpu_data(j).core_id)
+                               cpu_set(j, cpu_core_map[i]);
                }
-               clear_softint(tick_mask);
        }
 
-       do {
-               profile_tick(CPU_PROFILING, regs);
-               if (!--prof_counter(cpu)) {
-                       irq_enter();
+       for_each_present_cpu(i) {
+               unsigned int j;
 
-                       if (cpu == boot_cpu_id) {
-                               kstat_this_cpu.irqs[0]++;
-                               timer_tick_interrupt(regs);
-                       }
+               cpus_clear(per_cpu(cpu_sibling_map, i));
+               if (cpu_data(i).proc_id == -1) {
+                       cpu_set(i, per_cpu(cpu_sibling_map, i));
+                       continue;
+               }
 
-                       update_process_times(user);
+               for_each_present_cpu(j) {
+                       if (cpu_data(i).proc_id ==
+                           cpu_data(j).proc_id)
+                               cpu_set(j, per_cpu(cpu_sibling_map, i));
+               }
+       }
+}
 
-                       irq_exit();
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+       int ret = smp_boot_one_cpu(cpu);
 
-                       prof_counter(cpu) = prof_multiplier(cpu);
+       if (!ret) {
+               cpu_set(cpu, smp_commenced_mask);
+               while (!cpu_isset(cpu, cpu_online_map))
+                       mb();
+               if (!cpu_isset(cpu, cpu_online_map)) {
+                       ret = -ENODEV;
+               } else {
+                       /* On SUN4V, writes to %tick and %stick are
+                        * not allowed.
+                        */
+                       if (tlb_type != hypervisor)
+                               smp_synchronize_one_tick(cpu);
                }
-
-               /* Guarantee that the following sequences execute
-                * uninterrupted.
-                */
-               __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                                    "wrpr      %0, %1, %%pstate"
-                                    : "=r" (pstate)
-                                    : "i" (PSTATE_IE));
-
-               compare = tick_ops->add_compare(current_tick_offset);
-               tick = tick_ops->get_tick();
-
-               /* Restore PSTATE_IE. */
-               __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                                    : /* no outputs */
-                                    : "r" (pstate));
-       } while (time_after_eq(tick, compare));
+       }
+       return ret;
 }
 
-static void __init smp_setup_percpu_timer(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void cpu_play_dead(void)
 {
        int cpu = smp_processor_id();
        unsigned long pstate;
 
-       prof_counter(cpu) = prof_multiplier(cpu) = 1;
+       idle_task_exit();
 
-       /* Guarantee that the following sequences execute
-        * uninterrupted.
-        */
-       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
-                            "wrpr      %0, %1, %%pstate"
-                            : "=r" (pstate)
-                            : "i" (PSTATE_IE));
+       if (tlb_type == hypervisor) {
+               struct trap_per_cpu *tb = &trap_block[cpu];
 
-       tick_ops->init_tick(current_tick_offset);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
+                               tb->cpu_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
+                               tb->dev_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
+                               tb->resum_mondo_pa, 0);
+               sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
+                               tb->nonresum_mondo_pa, 0);
+       }
 
-       /* Restore PSTATE_IE. */
-       __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
-                            : /* no outputs */
-                            : "r" (pstate));
-}
+       cpu_clear(cpu, smp_commenced_mask);
+       membar_safe("#Sync");
 
-void __init smp_tick_init(void)
-{
-       boot_cpu_id = hard_smp_processor_id();
-       current_tick_offset = timer_tick_offset;
+       local_irq_disable();
 
-       cpu_set(boot_cpu_id, cpu_online_map);
-       prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
-}
+       __asm__ __volatile__(
+               "rdpr   %%pstate, %0\n\t"
+               "wrpr   %0, %1, %%pstate"
+               : "=r" (pstate)
+               : "i" (PSTATE_IE));
 
-/* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
+       while (1)
+               barrier();
+}
 
-int setup_profiling_timer(unsigned int multiplier)
+int __cpu_disable(void)
 {
-       unsigned long flags;
+       int cpu = smp_processor_id();
+       cpuinfo_sparc *c;
        int i;
 
-       if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
-               return -EINVAL;
+       for_each_cpu_mask(i, cpu_core_map[cpu])
+               cpu_clear(cpu, cpu_core_map[i]);
+       cpus_clear(cpu_core_map[cpu]);
 
-       spin_lock_irqsave(&prof_setup_lock, flags);
-       for (i = 0; i < NR_CPUS; i++)
-               prof_multiplier(i) = multiplier;
-       current_tick_offset = (timer_tick_offset / multiplier);
-       spin_unlock_irqrestore(&prof_setup_lock, flags);
+       for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
 
-       return 0;
-}
+       c = &cpu_data(cpu);
 
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
-       int instance, mid;
+       c->core_id = 0;
+       c->proc_id = -1;
 
-       instance = 0;
-       while (!cpu_find_by_instance(instance, NULL, &mid)) {
-               if (mid < max_cpus)
-                       cpu_set(mid, phys_cpu_present_map);
-               instance++;
-       }
-
-       if (num_possible_cpus() > max_cpus) {
-               instance = 0;
-               while (!cpu_find_by_instance(instance, NULL, &mid)) {
-                       if (mid != boot_cpu_id) {
-                               cpu_clear(mid, phys_cpu_present_map);
-                               if (num_possible_cpus() <= max_cpus)
-                                       break;
-                       }
-                       instance++;
-               }
-       }
+       spin_lock(&call_lock);
+       cpu_clear(cpu, cpu_online_map);
+       spin_unlock(&call_lock);
 
-       smp_store_cpu_info(boot_cpu_id);
-}
+       smp_wmb();
 
-void __devinit smp_prepare_boot_cpu(void)
-{
-       if (hard_smp_processor_id() >= NR_CPUS) {
-               prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
-               prom_halt();
-       }
+       /* Make sure no interrupts point to this cpu.  */
+       fixup_irqs();
 
-       current_thread_info()->cpu = hard_smp_processor_id();
+       local_irq_enable();
+       mdelay(1);
+       local_irq_disable();
 
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), phys_cpu_present_map);
+       return 0;
 }
 
-int __devinit __cpu_up(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
-       int ret = smp_boot_one_cpu(cpu);
+       int i;
 
-       if (!ret) {
-               cpu_set(cpu, smp_commenced_mask);
-               while (!cpu_isset(cpu, cpu_online_map))
-                       mb();
-               if (!cpu_isset(cpu, cpu_online_map)) {
-                       ret = -ENODEV;
-               } else {
-                       smp_synchronize_one_tick(cpu);
+       for (i = 0; i < 100; i++) {
+               smp_rmb();
+               if (!cpu_isset(cpu, smp_commenced_mask))
+                       break;
+               msleep(100);
+       }
+       if (cpu_isset(cpu, smp_commenced_mask)) {
+               printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+       } else {
+#if defined(CONFIG_SUN_LDOMS)
+               unsigned long hv_err;
+               int limit = 100;
+
+               do {
+                       hv_err = sun4v_cpu_stop(cpu);
+                       if (hv_err == HV_EOK) {
+                               cpu_clear(cpu, cpu_present_map);
+                               break;
+                       }
+               } while (--limit > 0);
+               if (limit <= 0) {
+                       printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
+                              hv_err);
                }
+#endif
        }
-       return ret;
 }
+#endif
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       unsigned long bogosum = 0;
-       int i;
-
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       bogosum += cpu_data(i).udelay_val;
-       }
-       printk("Total of %ld processors activated "
-              "(%lu.%02lu BogoMIPS).\n",
-              (long) num_online_cpus(),
-              bogosum/(500000/HZ),
-              (bogosum/(5000/HZ))%100);
 }
 
-/* This needn't do anything as we do not sleep the cpu
- * inside of the idler task, so an interrupt is not needed
- * to get a clean fast response.
- *
- * XXX Reverify this assumption... -DaveM
- *
- * Addendum: We do want it to do something for the signal
- *           delivery case, we detect that by just seeing
- *           if we are trying to send this to an idler or not.
- */
 void smp_send_reschedule(int cpu)
 {
-       if (cpu_data(cpu).idle_volume == 0)
-               smp_receive_signal(cpu);
+       smp_receive_signal(cpu);
 }
 
 /* This is a nop because we capture all other cpus
@@ -1181,49 +1445,30 @@ unsigned long __per_cpu_shift __read_mostly;
 EXPORT_SYMBOL(__per_cpu_base);
 EXPORT_SYMBOL(__per_cpu_shift);
 
-void __init setup_per_cpu_areas(void)
+void __init real_setup_per_cpu_areas(void)
 {
-       unsigned long goal, size, i;
+       unsigned long paddr, goal, size, i;
        char *ptr;
-       /* Created by linker magic */
-       extern char __per_cpu_start[], __per_cpu_end[];
 
        /* Copy section for each CPU (we discard the original) */
-       goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
+       goal = PERCPU_ENOUGH_ROOM;
 
-#ifdef CONFIG_MODULES
-       if (goal < PERCPU_ENOUGH_ROOM)
-               goal = PERCPU_ENOUGH_ROOM;
-#endif
-       __per_cpu_shift = 0;
-       for (size = 1UL; size < goal; size <<= 1UL)
+       __per_cpu_shift = PAGE_SHIFT;
+       for (size = PAGE_SIZE; size < goal; size <<= 1UL)
                __per_cpu_shift++;
 
-       /* Make sure the resulting __per_cpu_base value
-        * will fit in the 43-bit sign extended IMMU
-        * TSB register.
-        */
-       ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
-                             (unsigned long) __per_cpu_start);
-
-       __per_cpu_base = ptr - __per_cpu_start;
-
-       if ((__per_cpu_shift < PAGE_SHIFT) ||
-           (__per_cpu_base & ~PAGE_MASK) ||
-           (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
-               prom_printf("PER_CPU: Invalid layout, "
-                           "ptr[%p] shift[%lx] base[%lx]\n",
-                           ptr, __per_cpu_shift, __per_cpu_base);
+       paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
+       if (!paddr) {
+               prom_printf("Cannot allocate per-cpu memory.\n");
                prom_halt();
        }
 
+       ptr = __va(paddr);
+       __per_cpu_base = ptr - __per_cpu_start;
+
        for (i = 0; i < NR_CPUS; i++, ptr += size)
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
 
-       /* Finally, load in the boot cpu's base value.
-        * We abuse the IMMU TSB register for trap handler
-        * entry and exit loading of %g5.  That is why it
-        * has to be page aligned.
-        */
-       cpu_setup_percpu_base(hard_smp_processor_id());
+       /* Setup %g5 for the boot cpu.  */
+       __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
 }