include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / kernel / smpboot.c
index d720b7e..be40f82 100644 (file)
@@ -47,6 +47,9 @@
 #include <linux/bootmem.h>
 #include <linux/err.h>
 #include <linux/nmi.h>
+#include <linux/tboot.h>
+#include <linux/stackprotector.h>
+#include <linux/gfp.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -66,6 +69,7 @@
 #include <linux/mc146818rtc.h>
 
 #include <asm/smpboot_hooks.h>
+#include <asm/i8259.h>
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
@@ -240,6 +244,11 @@ static void __cpuinit smp_callin(void)
        map_cpu_to_logical_apicid();
 
        notify_cpu_starting(cpuid);
+
+       /*
+        * Need to setup vector mappings before we enable interrupts.
+        */
+       setup_vector_irq(smp_processor_id());
        /*
         * Get our bogomips.
         *
@@ -285,9 +294,9 @@ notrace static void __cpuinit start_secondary(void *unused)
        check_tsc_sync_target();
 
        if (nmi_watchdog == NMI_IO_APIC) {
-               disable_8259A_irq(0);
+               legacy_pic->chip->mask(0);
                enable_NMI_through_LVT0();
-               enable_8259A_irq(0);
+               legacy_pic->chip->unmask(0);
        }
 
 #ifdef CONFIG_X86_32
@@ -314,16 +323,19 @@ notrace static void __cpuinit start_secondary(void *unused)
         */
        ipi_call_lock();
        lock_vector_lock();
-       __setup_vector_irq(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+       x86_platform.nmi_init();
 
        /* enable local interrupts */
        local_irq_enable();
 
-       setup_secondary_clock();
+       /* to prevent fake stack check failure in clock setup */
+       boot_init_stack_canary();
+
+       x86_cpuinit.setup_percpu_clockev();
 
        wmb();
        cpu_idle();
@@ -434,7 +446,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
         * For perf, we return last level cache shared map.
         * And for power savings, we return cpu_core_map
         */
-       if (sched_mc_power_savings || sched_smt_power_savings)
+       if ((sched_mc_power_savings || sched_smt_power_savings) &&
+           !(cpu_has(c, X86_FEATURE_AMD_DCM)))
                return cpu_core_mask(cpu);
        else
                return c->llc_shared_map;
@@ -669,6 +682,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+       static int current_node = -1;
+       int node = cpu_to_node(cpu);
+
+       if (system_state == SYSTEM_BOOTING) {
+               if (node != current_node) {
+                       if (current_node > (-1))
+                               pr_cont(" Ok.\n");
+                       current_node = node;
+                       pr_info("Booting Node %3d, Processors ", node);
+               }
+               pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+               return;
+       } else
+               pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+                       node, cpu, apicid);
+}
+
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -685,7 +718,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
 
-       INIT_WORK(&c_idle.work, do_fork_idle);
+       INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
@@ -711,6 +744,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        if (IS_ERR(c_idle.idle)) {
                printk("failed fork for CPU %d\n", cpu);
+               destroy_work_on_stack(&c_idle.work);
                return PTR_ERR(c_idle.idle);
        }
 
@@ -734,9 +768,8 @@ do_rest:
        /* start_ip had better be page-aligned! */
        start_ip = setup_trampoline();
 
-       /* So we see what's up   */
-       printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
-                         cpu, apicid, start_ip);
+       /* So we see what's up */
+       announce_cpu(cpu, apicid);
 
        /*
         * This grunge runs the startup process for
@@ -785,21 +818,17 @@ do_rest:
                        udelay(100);
                }
 
-               if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       pr_debug("OK.\n");
-                       printk(KERN_INFO "CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data(cpu));
-                       pr_debug("CPU has booted.\n");
-               } else {
+               if (cpumask_test_cpu(cpu, cpu_callin_mask))
+                       pr_debug("CPU%d: has booted.\n", cpu);
+               else {
                        boot_error = 1;
                        if (*((volatile unsigned char *)trampoline_base)
                                        == 0xA5)
                                /* trampoline started but...? */
-                               printk(KERN_ERR "Stuck ??\n");
+                               pr_err("CPU%d: Stuck ??\n", cpu);
                        else
                                /* trampoline code not run */
-                               printk(KERN_ERR "Not responding.\n");
+                               pr_err("CPU%d: Not responding.\n", cpu);
                        if (apic->inquire_remote_apic)
                                apic->inquire_remote_apic(apicid);
                }
@@ -829,6 +858,7 @@ do_rest:
                smpboot_restore_warm_reset_vector();
        }
 
+       destroy_work_on_stack(&c_idle.work);
        return boot_error;
 }
 
@@ -1057,19 +1087,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
-               alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
-               alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
-               cpumask_clear(per_cpu(cpu_core_map, i));
-               cpumask_clear(per_cpu(cpu_sibling_map, i));
-               cpumask_clear(cpu_data(i).llc_shared_map);
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
        }
        set_cpu_sibling_map(0);
 
        enable_IR_x2apic();
-#ifdef CONFIG_X86_64
        default_setup_apic_routing();
-#endif
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
@@ -1112,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        printk(KERN_INFO "CPU%d: ", 0);
        print_cpu_info(&cpu_data(0));
-       setup_boot_clock();
+       x86_init.timers.setup_percpu_clockev();
 
        if (is_uv_system())
                uv_system_init();
@@ -1197,11 +1222,12 @@ __init void prefill_possible_map(void)
 
        total_cpus = max_t(int, possible, num_processors + disabled_cpus);
 
-       if (possible > CONFIG_NR_CPUS) {
+       /* nr_cpu_ids could be reduced via nr_cpus= */
+       if (possible > nr_cpu_ids) {
                printk(KERN_WARNING
                        "%d Processors exceeds NR_CPUS limit of %d\n",
-                       possible, CONFIG_NR_CPUS);
-               possible = CONFIG_NR_CPUS;
+                       possible, nr_cpu_ids);
+               possible = nr_cpu_ids;
        }
 
        printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
@@ -1251,16 +1277,7 @@ static void __ref remove_cpu_from_maps(int cpu)
 void cpu_disable_common(void)
 {
        int cpu = smp_processor_id();
-       /*
-        * HACK:
-        * Allow any queued timer interrupts to get serviced
-        * This is only a temporary solution until we cleanup
-        * fixup_irqs as we do for IA64.
-        */
-       local_irq_enable();
-       mdelay(1);
 
-       local_irq_disable();
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
@@ -1301,14 +1318,16 @@ void native_cpu_die(unsigned int cpu)
        for (i = 0; i < 10; i++) {
                /* They ack this in play_dead by setting CPU_DEAD */
                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk(KERN_INFO "CPU %d is now offline\n", cpu);
+                       if (system_state == SYSTEM_RUNNING)
+                               pr_info("CPU %u is now offline\n", cpu);
+
                        if (1 == num_online_cpus())
                                alternatives_smp_switch(0);
                        return;
                }
                msleep(100);
        }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+       pr_err("CPU %u didn't die...\n", cpu);
 }
 
 void play_dead_common(void)
@@ -1331,6 +1350,7 @@ void play_dead_common(void)
 void native_play_dead(void)
 {
        play_dead_common();
+       tboot_shutdown(TB_SHUTDOWN_WFS);
        wbinvd_halt();
 }