[SPARC32]: Fix sparc32 modpost warnings with sunzilog
[safe/jmp/linux-2.6] / arch / sparc64 / kernel / smp.c
index 7d7e02b..cc09d82 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/cpudata.h>
 
 #include <asm/irq.h>
+#include <asm/irq_regs.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/oplib.h>
@@ -39,6 +40,7 @@
 #include <asm/starfire.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
+#include <asm/prom.h>
 
 extern void calibrate_delay(void);
 
@@ -47,6 +49,8 @@ static unsigned char boot_cpu_id;
 
 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -55,53 +59,62 @@ void smp_info(struct seq_file *m)
        int i;
        
        seq_printf(m, "State:\n");
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "CPU%d:\t\tonline\n", i);
-       }
+       for_each_online_cpu(i)
+               seq_printf(m, "CPU%d:\t\tonline\n", i);
 }
 
 void smp_bogo(struct seq_file *m)
 {
        int i;
        
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_online(i))
-                       seq_printf(m,
-                                  "Cpu%dBogo\t: %lu.%02lu\n"
-                                  "Cpu%dClkTck\t: %016lx\n",
-                                  i, cpu_data(i).udelay_val / (500000/HZ),
-                                  (cpu_data(i).udelay_val / (5000/HZ)) % 100,
-                                  i, cpu_data(i).clock_tick);
+       for_each_online_cpu(i)
+               seq_printf(m,
+                          "Cpu%dBogo\t: %lu.%02lu\n"
+                          "Cpu%dClkTck\t: %016lx\n",
+                          i, cpu_data(i).udelay_val / (500000/HZ),
+                          (cpu_data(i).udelay_val / (5000/HZ)) % 100,
+                          i, cpu_data(i).clock_tick);
 }
 
 void __init smp_store_cpu_info(int id)
 {
-       int cpu_node;
+       struct device_node *dp;
+       int def;
 
        /* multiplier and counter set by
           smp_setup_percpu_timer()  */
        cpu_data(id).udelay_val                 = loops_per_jiffy;
 
-       cpu_find_by_mid(id, &cpu_node);
-       cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
-                                                    "clock-frequency", 0);
+       cpu_find_by_mid(id, &dp);
+       cpu_data(id).clock_tick =
+               of_getintprop_default(dp, "clock-frequency", 0);
 
-       cpu_data(id).idle_volume                = 1;
+       def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
+       cpu_data(id).dcache_size =
+               of_getintprop_default(dp, "dcache-size", def);
 
-       cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
-                                                     16 * 1024);
+       def = 32;
        cpu_data(id).dcache_line_size =
-               prom_getintdefault(cpu_node, "dcache-line-size", 32);
-       cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
-                                                     16 * 1024);
+               of_getintprop_default(dp, "dcache-line-size", def);
+
+       def = 16 * 1024;
+       cpu_data(id).icache_size =
+               of_getintprop_default(dp, "icache-size", def);
+
+       def = 32;
        cpu_data(id).icache_line_size =
-               prom_getintdefault(cpu_node, "icache-line-size", 32);
-       cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
-                                                     4 * 1024 * 1024);
+               of_getintprop_default(dp, "icache-line-size", def);
+
+       def = ((tlb_type == hypervisor) ?
+              (3 * 1024 * 1024) :
+              (4 * 1024 * 1024));
+       cpu_data(id).ecache_size =
+               of_getintprop_default(dp, "ecache-size", def);
+
+       def = 64;
        cpu_data(id).ecache_line_size =
-               prom_getintdefault(cpu_node, "ecache-line-size", 64);
+               of_getintprop_default(dp, "ecache-line-size", def);
+
        printk("CPU[%d]: Caches "
               "D[sz(%d):line_sz(%d)] "
               "I[sz(%d):line_sz(%d)] "
@@ -122,10 +135,8 @@ void __init smp_callin(void)
 
        __local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-       if (tlb_type == hypervisor) {
-               sun4v_register_fault_status();
+       if (tlb_type == hypervisor)
                sun4v_ktsb_register();
-       }
 
        __flush_tlb_all();
 
@@ -304,6 +315,8 @@ static void smp_synchronize_one_tick(int cpu)
        spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
+extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
+
 extern unsigned long sparc64_cpu_startup;
 
 /* The OBP cpu startup callback truncates the 3rd arg cookie to
@@ -319,21 +332,31 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
        unsigned long cookie =
                (unsigned long)(&cpu_new_thread);
        struct task_struct *p;
-       int timeout, ret, cpu_node;
+       int timeout, ret;
 
        p = fork_idle(cpu);
        callin_flag = 0;
        cpu_new_thread = task_thread_info(p);
        cpu_set(cpu, cpu_callout_map);
 
-       cpu_find_by_mid(cpu, &cpu_node);
-       prom_startcpu(cpu_node, entry, cookie);
+       if (tlb_type == hypervisor) {
+               /* Alloc the mondo queues, cpu will load them.  */
+               sun4v_init_mondo_queues(0, cpu, 1, 0);
+
+               prom_startcpu_cpuid(cpu, entry, cookie);
+       } else {
+               struct device_node *dp;
+
+               cpu_find_by_mid(cpu, &dp);
+               prom_startcpu(dp->node, entry, cookie);
+       }
 
        for (timeout = 0; timeout < 5000000; timeout++) {
                if (callin_flag)
                        break;
                udelay(100);
        }
+
        if (callin_flag) {
                ret = 0;
        } else {
@@ -533,133 +556,154 @@ retry:
        }
 }
 
-#if 0
 /* Multi-cpu list version.  */
-static int init_cpu_list(u16 *list, cpumask_t mask)
-{
-       int i, cnt;
-
-       cnt = 0;
-       for_each_cpu_mask(i, mask)
-               list[cnt++] = i;
-
-       return cnt;
-}
-
-static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask)
-{
-       int i;
-
-       for (i = 0; i < orig_cnt; i++) {
-               if (list[i] == 0xffff)
-                       cpu_clear(i, mask);
-       }
-
-       return init_cpu_list(list, mask);
-}
-
 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
-       int this_cpu = get_cpu();
-       struct trap_per_cpu *tb = &trap_block[this_cpu];
-       u64 *mondo = __va(tb->cpu_mondo_block_pa);
-       u16 *cpu_list = __va(tb->cpu_list_pa);
-       int cnt, retries;
+       struct trap_per_cpu *tb;
+       u16 *cpu_list;
+       u64 *mondo;
+       cpumask_t error_mask;
+       unsigned long flags, status;
+       int cnt, retries, this_cpu, prev_sent, i;
+
+       /* We have to do this whole thing with interrupts fully disabled.
+        * Otherwise if we send an xcall from interrupt context it will
+        * corrupt both our mondo block and cpu list state.
+        *
+        * One consequence of this is that we cannot use timeout mechanisms
+        * that depend upon interrupts being delivered locally.  So, for
+        * example, we cannot sample jiffies and expect it to advance.
+        *
+        * Fortunately, udelay() uses %stick/%tick so we can use that.
+        */
+       local_irq_save(flags);
+
+       this_cpu = smp_processor_id();
+       tb = &trap_block[this_cpu];
 
+       mondo = __va(tb->cpu_mondo_block_pa);
        mondo[0] = data0;
        mondo[1] = data1;
        mondo[2] = data2;
        wmb();
 
+       cpu_list = __va(tb->cpu_list_pa);
+
+       /* Setup the initial cpu list.  */
+       cnt = 0;
+       for_each_cpu_mask(i, mask)
+               cpu_list[cnt++] = i;
+
+       cpus_clear(error_mask);
        retries = 0;
-       cnt = init_cpu_list(cpu_list, mask);
+       prev_sent = 0;
        do {
-               register unsigned long func __asm__("%o5");
-               register unsigned long arg0 __asm__("%o0");
-               register unsigned long arg1 __asm__("%o1");
-               register unsigned long arg2 __asm__("%o2");
-
-               func = HV_FAST_CPU_MONDO_SEND;
-               arg0 = cnt;
-               arg1 = tb->cpu_list_pa;
-               arg2 = tb->cpu_mondo_block_pa;
-
-               __asm__ __volatile__("ta        %8"
-                                    : "=&r" (func), "=&r" (arg0),
-                                      "=&r" (arg1), "=&r" (arg2)
-                                    : "0" (func), "1" (arg0),
-                                      "2" (arg1), "3" (arg2),
-                                      "i" (HV_FAST_TRAP)
-                                    : "memory");
-               if (likely(func == HV_EOK))
-                       break;
+               int forward_progress, n_sent;
 
-               if (unlikely(++retries > 100)) {
-                       printk("CPU[%d]: sun4v mondo error %lu\n",
-                              this_cpu, func);
+               status = sun4v_cpu_mondo_send(cnt,
+                                             tb->cpu_list_pa,
+                                             tb->cpu_mondo_block_pa);
+
+               /* HV_EOK means all cpus received the xcall, we're done.  */
+               if (likely(status == HV_EOK))
                        break;
+
+               /* First, see if we made any forward progress.
+                *
+                * The hypervisor indicates successful sends by setting
+                * cpu list entries to the value 0xffff.
+                */
+               n_sent = 0;
+               for (i = 0; i < cnt; i++) {
+                       if (likely(cpu_list[i] == 0xffff))
+                               n_sent++;
                }
 
-               cnt = update_cpu_list(cpu_list, cnt, mask);
+               forward_progress = 0;
+               if (n_sent > prev_sent)
+                       forward_progress = 1;
 
-               udelay(2 * cnt);
-       } while (1);
+               prev_sent = n_sent;
 
-       put_cpu();
-}
-#else
-/* Single-cpu list version.  */
-static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
-{
-       int this_cpu = get_cpu();
-       struct trap_per_cpu *tb = &trap_block[this_cpu];
-       u64 *mondo = __va(tb->cpu_mondo_block_pa);
-       u16 *cpu_list = __va(tb->cpu_list_pa);
-       int i;
+               /* If we get a HV_ECPUERROR, then one or more of the cpus
+                * in the list are in error state.  Use the cpu_state()
+                * hypervisor call to find out which cpus are in error state.
+                */
+               if (unlikely(status == HV_ECPUERROR)) {
+                       for (i = 0; i < cnt; i++) {
+                               long err;
+                               u16 cpu;
+
+                               cpu = cpu_list[i];
+                               if (cpu == 0xffff)
+                                       continue;
+
+                               err = sun4v_cpu_state(cpu);
+                               if (err >= 0 &&
+                                   err == HV_CPU_STATE_ERROR) {
+                                       cpu_list[i] = 0xffff;
+                                       cpu_set(cpu, error_mask);
+                               }
+                       }
+               } else if (unlikely(status != HV_EWOULDBLOCK))
+                       goto fatal_mondo_error;
+
+               /* Don't bother rewriting the CPU list, just leave the
+                * 0xffff and non-0xffff entries in there and the
+                * hypervisor will do the right thing.
+                *
+                * Only advance timeout state if we didn't make any
+                * forward progress.
+                */
+               if (unlikely(!forward_progress)) {
+                       if (unlikely(++retries > 10000))
+                               goto fatal_mondo_timeout;
 
-       mondo[0] = data0;
-       mondo[1] = data1;
-       mondo[2] = data2;
-       wmb();
+                       /* Delay a little bit to let other cpus catch up
+                        * on their cpu mondo queue work.
+                        */
+                       udelay(2 * cnt);
+               }
+       } while (1);
 
-       for_each_cpu_mask(i, mask) {
-               int retries = 0;
+       local_irq_restore(flags);
 
-               do {
-                       register unsigned long func __asm__("%o5");
-                       register unsigned long arg0 __asm__("%o0");
-                       register unsigned long arg1 __asm__("%o1");
-                       register unsigned long arg2 __asm__("%o2");
-
-                       cpu_list[0] = i;
-                       func = HV_FAST_CPU_MONDO_SEND;
-                       arg0 = 1;
-                       arg1 = tb->cpu_list_pa;
-                       arg2 = tb->cpu_mondo_block_pa;
-
-                       __asm__ __volatile__("ta        %8"
-                                            : "=&r" (func), "=&r" (arg0),
-                                              "=&r" (arg1), "=&r" (arg2)
-                                            : "0" (func), "1" (arg0),
-                                              "2" (arg1), "3" (arg2),
-                                              "i" (HV_FAST_TRAP)
-                                            : "memory");
-                       if (likely(func == HV_EOK))
-                               break;
+       if (unlikely(!cpus_empty(error_mask)))
+               goto fatal_mondo_cpu_error;
 
-                       if (unlikely(++retries > 100)) {
-                               printk("CPU[%d]: sun4v mondo error %lu\n",
-                                      this_cpu, func);
-                               break;
-                       }
+       return;
 
-                       udelay(2 * i);
-               } while (1);
-       }
+fatal_mondo_cpu_error:
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+              "were in error state\n",
+              this_cpu);
+       printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
+       for_each_cpu_mask(i, error_mask)
+               printk("%d ", i);
+       printk("]\n");
+       return;
 
-       put_cpu();
+fatal_mondo_timeout:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+              " progress after %d retries.\n",
+              this_cpu, retries);
+       goto dump_cpu_list_and_out;
+
+fatal_mondo_error:
+       local_irq_restore(flags);
+       printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+              this_cpu, status);
+       printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+              "mondo_block_pa(%lx)\n",
+              this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+
+dump_cpu_list_and_out:
+       printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+       for (i = 0; i < cnt; i++)
+               printk("%u ", cpu_list[i]);
+       printk("]\n");
 }
-#endif
 
 /* Send cross call to all processors mentioned in MASK
  * except self.
@@ -704,12 +748,21 @@ struct call_data_struct {
        int wait;
 };
 
-static DEFINE_SPINLOCK(call_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
 static struct call_data_struct *call_data;
 
 extern unsigned long xcall_call_function;
 
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
@@ -717,11 +770,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
                                  int nonatomic, int wait, cpumask_t mask)
 {
        struct call_data_struct data;
-       int cpus = cpus_weight(mask) - 1;
-       long timeout;
-
-       if (!cpus)
-               return 0;
+       int cpus;
 
        /* Can deadlock when called with interrupts disabled */
        WARN_ON(irqs_disabled());
@@ -733,32 +782,24 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
 
        spin_lock(&call_lock);
 
+       cpu_clear(smp_processor_id(), mask);
+       cpus = cpus_weight(mask);
+       if (!cpus)
+               goto out_unlock;
+
        call_data = &data;
+       mb();
 
        smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
 
-       /* 
-        * Wait for other cpus to complete function or at
-        * least snap the call data.
-        */
-       timeout = 1000000;
-       while (atomic_read(&data.finished) != cpus) {
-               if (--timeout <= 0)
-                       goto out_timeout;
-               barrier();
-               udelay(1);
-       }
+       /* Wait for response */
+       while (atomic_read(&data.finished) != cpus)
+               cpu_relax();
 
+out_unlock:
        spin_unlock(&call_lock);
 
        return 0;
-
-out_timeout:
-       spin_unlock(&call_lock);
-       printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
-              (long) num_online_cpus() - 1L,
-              (long) atomic_read(&data.finished));
-       return 0;
 }
 
 int smp_call_function(void (*func)(void *info), void *info,
@@ -787,9 +828,16 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
 
 static void tsb_sync(void *info)
 {
+       struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
        struct mm_struct *mm = info;
 
-       if (current->active_mm == mm)
+       /* It is not valid to test "currrent->active_mm == mm" here.
+        *
+        * The value of "current" is not changed atomically with
+        * switch_mm().  But that's OK, we just need to check the
+        * current cpu's trap block PGD physical address.
+        */
+       if (tp->pgd_paddr == __pa(mm->pgd))
                tsb_context_switch(mm);
 }
 
@@ -803,6 +851,7 @@ extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
+extern unsigned long xcall_new_mmu_context_version;
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -917,28 +966,55 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        put_cpu();
 }
 
+static void __smp_receive_signal_mask(cpumask_t mask)
+{
+       smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
+}
+
 void smp_receive_signal(int cpu)
 {
        cpumask_t mask = cpumask_of_cpu(cpu);
 
-       if (cpu_online(cpu)) {
-               u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
-
-               if (tlb_type == spitfire)
-                       spitfire_xcall_deliver(data0, 0, 0, mask);
-               else if (tlb_type == cheetah || tlb_type == cheetah_plus)
-                       cheetah_xcall_deliver(data0, 0, 0, mask);
-               else if (tlb_type == hypervisor)
-                       hypervisor_xcall_deliver(data0, 0, 0, mask);
-       }
+       if (cpu_online(cpu))
+               __smp_receive_signal_mask(mask);
 }
 
 void smp_receive_signal_client(int irq, struct pt_regs *regs)
 {
-       /* Just return, rtrap takes care of the rest. */
        clear_softint(1 << irq);
 }
 
+void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
+{
+       struct mm_struct *mm;
+       unsigned long flags;
+
+       clear_softint(1 << irq);
+
+       /* See if we need to allocate a new TLB context because
+        * the version of the one we are using is now out of date.
+        */
+       mm = current->active_mm;
+       if (unlikely(!mm || (mm == &init_mm)))
+               return;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       if (unlikely(!CTX_VALID(mm->context)))
+               get_new_mmu_context(mm);
+
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+
+       load_secondary_context(mm);
+       __flush_tlb_mm(CTX_HWBITS(mm->context),
+                      SECONDARY_CONTEXT);
+}
+
+void smp_new_mmu_context_version(void)
+{
+       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
+}
+
 void smp_report_regs(void)
 {
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
@@ -1112,6 +1188,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
        unsigned long compare, tick, pstate;
        int cpu = smp_processor_id();
        int user = user_mode(regs);
+       struct pt_regs *old_regs;
 
        /*
         * Check for level 14 softint.
@@ -1128,8 +1205,9 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
                clear_softint(tick_mask);
        }
 
+       old_regs = set_irq_regs(regs);
        do {
-               profile_tick(CPU_PROFILING, regs);
+               profile_tick(CPU_PROFILING);
                if (!--prof_counter(cpu)) {
                        irq_enter();
 
@@ -1161,6 +1239,7 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
                                     : /* no outputs */
                                     : "r" (pstate));
        } while (time_after_eq(tick, compare));
+       set_irq_regs(old_regs);
 }
 
 static void __init smp_setup_percpu_timer(void)
@@ -1191,7 +1270,6 @@ void __init smp_tick_init(void)
        boot_cpu_id = hard_smp_processor_id();
        current_tick_offset = timer_tick_offset;
 
-       cpu_set(boot_cpu_id, cpu_online_map);
        prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
 }
 
@@ -1207,7 +1285,7 @@ int setup_profiling_timer(unsigned int multiplier)
                return -EINVAL;
 
        spin_lock_irqsave(&prof_setup_lock, flags);
-       for (i = 0; i < NR_CPUS; i++)
+       for_each_possible_cpu(i)
                prof_multiplier(i) = multiplier;
        current_tick_offset = (timer_tick_offset / multiplier);
        spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1215,9 +1293,46 @@ int setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+static void __init smp_tune_scheduling(void)
+{
+       struct device_node *dp;
+       int instance;
+       unsigned int def, smallest = ~0U;
+
+       def = ((tlb_type == hypervisor) ?
+              (3 * 1024 * 1024) :
+              (4 * 1024 * 1024));
+
+       instance = 0;
+       while (!cpu_find_by_instance(instance, &dp, NULL)) {
+               unsigned int val;
+
+               val = of_getintprop_default(dp, "ecache-size", def);
+               if (val < smallest)
+                       smallest = val;
+
+               instance++;
+       }
+
+       /* Any value less than 256K is nonsense.  */
+       if (smallest < (256U * 1024U))
+               smallest = 256 * 1024;
+
+       max_cache_size = smallest;
+
+       if (smallest < 1U * 1024U * 1024U)
+               printk(KERN_INFO "Using max_cache_size of %uKB\n",
+                      smallest / 1024U);
+       else
+               printk(KERN_INFO "Using max_cache_size of %uMB\n",
+                      smallest / 1024U / 1024U);
+}
+
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
+       int i;
+
        if (num_possible_cpus() > max_cpus) {
                int instance, mid;
 
@@ -1225,6 +1340,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                while (!cpu_find_by_instance(instance, NULL, &mid)) {
                        if (mid != boot_cpu_id) {
                                cpu_clear(mid, phys_cpu_present_map);
+                               cpu_clear(mid, cpu_present_map);
                                if (num_possible_cpus() <= max_cpus)
                                        break;
                        }
@@ -1232,7 +1348,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                }
        }
 
+       for_each_possible_cpu(i) {
+               if (tlb_type == hypervisor) {
+                       int j;
+
+                       /* XXX get this mapping from machine description */
+                       for_each_possible_cpu(j) {
+                               if ((j >> 2) == (i >> 2))
+                                       cpu_set(j, cpu_sibling_map[i]);
+                       }
+               } else {
+                       cpu_set(i, cpu_sibling_map[i]);
+               }
+       }
+
        smp_store_cpu_info(boot_cpu_id);
+       smp_tune_scheduling();
 }
 
 /* Set this up early so that things like the scheduler can init
@@ -1245,26 +1376,16 @@ void __init smp_setup_cpu_possible_map(void)
 
        instance = 0;
        while (!cpu_find_by_instance(instance, NULL, &mid)) {
-               if (mid < NR_CPUS)
+               if (mid < NR_CPUS) {
                        cpu_set(mid, phys_cpu_present_map);
+                       cpu_set(mid, cpu_present_map);
+               }
                instance++;
        }
 }
 
 void __devinit smp_prepare_boot_cpu(void)
 {
-       int cpu = hard_smp_processor_id();
-
-       if (cpu >= NR_CPUS) {
-               prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
-               prom_halt();
-       }
-
-       current_thread_info()->cpu = cpu;
-       __local_per_cpu_offset = __per_cpu_offset(cpu);
-
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), phys_cpu_present_map);
 }
 
 int __devinit __cpu_up(unsigned int cpu)
@@ -1278,7 +1399,11 @@ int __devinit __cpu_up(unsigned int cpu)
                if (!cpu_isset(cpu, cpu_online_map)) {
                        ret = -ENODEV;
                } else {
-                       smp_synchronize_one_tick(cpu);
+                       /* On SUN4V, writes to %tick and %stick are
+                        * not allowed.
+                        */
+                       if (tlb_type != hypervisor)
+                               smp_synchronize_one_tick(cpu);
                }
        }
        return ret;
@@ -1289,10 +1414,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
        unsigned long bogosum = 0;
        int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i))
-                       bogosum += cpu_data(i).udelay_val;
-       }
+       for_each_online_cpu(i)
+               bogosum += cpu_data(i).udelay_val;
        printk("Total of %ld processors activated "
               "(%lu.%02lu BogoMIPS).\n",
               (long) num_online_cpus(),
@@ -1339,4 +1462,7 @@ void __init setup_per_cpu_areas(void)
 
        for (i = 0; i < NR_CPUS; i++, ptr += size)
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+       /* Setup %g5 for the boot cpu.  */
+       __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
 }