Merge branch 'x86/irq' into x86/apic
[safe/jmp/linux-2.6] / arch / x86 / kernel / smpboot.c
index a25eeec..da99eef 100644 (file)
@@ -241,6 +241,11 @@ static void __cpuinit smp_callin(void)
        map_cpu_to_logical_apicid();
 
        notify_cpu_starting(cpuid);
+
+       /*
+        * Need to setup vector mappings before we enable interrupts.
+        */
+       __setup_vector_irq(smp_processor_id());
        /*
         * Get our bogomips.
         *
@@ -315,7 +320,6 @@ notrace static void __cpuinit start_secondary(void *unused)
         */
        ipi_call_lock();
        lock_vector_lock();
-       __setup_vector_irq(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        ipi_call_unlock();
@@ -324,7 +328,7 @@ notrace static void __cpuinit start_secondary(void *unused)
        /* enable local interrupts */
        local_irq_enable();
 
-       setup_secondary_clock();
+       x86_cpuinit.setup_percpu_clockev();
 
        wmb();
        cpu_idle();
@@ -671,6 +675,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+       static int current_node = -1;
+       int node = cpu_to_node(cpu);
+
+       if (system_state == SYSTEM_BOOTING) {
+               if (node != current_node) {
+                       if (current_node > (-1))
+                               pr_cont(" Ok.\n");
+                       current_node = node;
+                       pr_info("Booting Node %3d, Processors ", node);
+               }
+               pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+               return;
+       } else
+               pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+                       node, cpu, apicid);
+}
+
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -687,7 +711,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
 
-       INIT_WORK(&c_idle.work, do_fork_idle);
+       INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
@@ -713,6 +737,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        if (IS_ERR(c_idle.idle)) {
                printk("failed fork for CPU %d\n", cpu);
+               destroy_work_on_stack(&c_idle.work);
                return PTR_ERR(c_idle.idle);
        }
 
@@ -736,9 +761,8 @@ do_rest:
        /* start_ip had better be page-aligned! */
        start_ip = setup_trampoline();
 
-       /* So we see what's up   */
-       printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
-                         cpu, apicid, start_ip);
+       /* So we see what's up */
+       announce_cpu(cpu, apicid);
 
        /*
         * This grunge runs the startup process for
@@ -787,21 +811,17 @@ do_rest:
                        udelay(100);
                }
 
-               if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       pr_debug("OK.\n");
-                       printk(KERN_INFO "CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data(cpu));
-                       pr_debug("CPU has booted.\n");
-               } else {
+               if (cpumask_test_cpu(cpu, cpu_callin_mask))
+                       pr_debug("CPU%d: has booted.\n", cpu);
+               else {
                        boot_error = 1;
                        if (*((volatile unsigned char *)trampoline_base)
                                        == 0xA5)
                                /* trampoline started but...? */
-                               printk(KERN_ERR "Stuck ??\n");
+                               pr_err("CPU%d: Stuck ??\n", cpu);
                        else
                                /* trampoline code not run */
-                               printk(KERN_ERR "Not responding.\n");
+                               pr_err("CPU%d: Not responding.\n", cpu);
                        if (apic->inquire_remote_apic)
                                apic->inquire_remote_apic(apicid);
                }
@@ -831,6 +851,7 @@ do_rest:
                smpboot_restore_warm_reset_vector();
        }
 
+       destroy_work_on_stack(&c_idle.work);
        return boot_error;
 }
 
@@ -1059,19 +1080,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
-               alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
-               cpumask_clear(per_cpu(cpu_core_map, i));
-               cpumask_clear(per_cpu(cpu_sibling_map, i));
-               cpumask_clear(cpu_data(i).llc_shared_map);
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
        }
        set_cpu_sibling_map(0);
 
        enable_IR_x2apic();
-#ifdef CONFIG_X86_64
        default_setup_apic_routing();
-#endif
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
@@ -1114,7 +1130,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        printk(KERN_INFO "CPU%d: ", 0);
        print_cpu_info(&cpu_data(0));
-       setup_boot_clock();
+       x86_init.timers.setup_percpu_clockev();
 
        if (is_uv_system())
                uv_system_init();
@@ -1199,11 +1215,12 @@ __init void prefill_possible_map(void)
 
        total_cpus = max_t(int, possible, num_processors + disabled_cpus);
 
-       if (possible > CONFIG_NR_CPUS) {
+       /* nr_cpu_ids could be reduced via nr_cpus= */
+       if (possible > nr_cpu_ids) {
                printk(KERN_WARNING
                        "%d Processors exceeds NR_CPUS limit of %d\n",
-                       possible, CONFIG_NR_CPUS);
-               possible = CONFIG_NR_CPUS;
+                       possible, nr_cpu_ids);
+               possible = nr_cpu_ids;
        }
 
        printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
@@ -1253,16 +1270,7 @@ static void __ref remove_cpu_from_maps(int cpu)
 void cpu_disable_common(void)
 {
        int cpu = smp_processor_id();
-       /*
-        * HACK:
-        * Allow any queued timer interrupts to get serviced
-        * This is only a temporary solution until we cleanup
-        * fixup_irqs as we do for IA64.
-        */
-       local_irq_enable();
-       mdelay(1);
 
-       local_irq_disable();
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
@@ -1303,14 +1311,16 @@ void native_cpu_die(unsigned int cpu)
        for (i = 0; i < 10; i++) {
                /* They ack this in play_dead by setting CPU_DEAD */
                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk(KERN_INFO "CPU %d is now offline\n", cpu);
+                       if (system_state == SYSTEM_RUNNING)
+                               pr_info("CPU %u is now offline\n", cpu);
+
                        if (1 == num_online_cpus())
                                alternatives_smp_switch(0);
                        return;
                }
                msleep(100);
        }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+       pr_err("CPU %u didn't die...\n", cpu);
 }
 
 void play_dead_common(void)