include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / kernel / smpboot.c
index 3fed177..be40f82 100644 (file)
@@ -2,7 +2,7 @@
  *     x86 SMP booting functions
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *     Copyright 2001 Andi Kleen, SuSE Labs.
  *
  *     Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -47,6 +47,9 @@
 #include <linux/bootmem.h>
 #include <linux/err.h>
 #include <linux/nmi.h>
+#include <linux/tboot.h>
+#include <linux/stackprotector.h>
+#include <linux/gfp.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
 #include <asm/vmi.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
 
-#include <mach_apic.h>
-#include <smpboot_hooks.h>
+#include <asm/smpboot_hooks.h>
+#include <asm/i8259.h>
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
@@ -102,29 +105,20 @@ EXPORT_SYMBOL(smp_num_siblings);
 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 
 /* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* Per CPU bogomips and other parameters */
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-static atomic_t init_deasserted;
-
-
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
+atomic_t init_deasserted;
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
-
-/* which logical CPUs are on which nodes */
-cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
-                               { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-EXPORT_SYMBOL(node_to_cpumask_map);
 /* which node each logical CPU is on */
 int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
 EXPORT_SYMBOL(cpu_to_node_map);
@@ -133,7 +127,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
 static void map_cpu_to_node(int cpu, int node)
 {
        printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
-       cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
+       cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = node;
 }
 
@@ -144,7 +138,7 @@ static void unmap_cpu_to_node(int cpu)
 
        printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
        for (node = 0; node < MAX_NUMNODES; node++)
-               cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
+               cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = 0;
 }
 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -250,6 +244,11 @@ static void __cpuinit smp_callin(void)
        map_cpu_to_logical_apicid();
 
        notify_cpu_starting(cpuid);
+
+       /*
+        * Need to setup vector mappings before we enable interrupts.
+        */
+       setup_vector_irq(smp_processor_id());
        /*
         * Get our bogomips.
         *
@@ -272,8 +271,6 @@ static void __cpuinit smp_callin(void)
        cpumask_set_cpu(cpuid, cpu_callin_mask);
 }
 
-static int __cpuinitdata unsafe_smp;
-
 /*
  * Activate a secondary processor.
  */
@@ -297,9 +294,9 @@ notrace static void __cpuinit start_secondary(void *unused)
        check_tsc_sync_target();
 
        if (nmi_watchdog == NMI_IO_APIC) {
-               disable_8259A_irq(0);
+               legacy_pic->chip->mask(0);
                enable_NMI_through_LVT0();
-               enable_8259A_irq(0);
+               legacy_pic->chip->unmask(0);
        }
 
 #ifdef CONFIG_X86_32
@@ -308,7 +305,7 @@ notrace static void __cpuinit start_secondary(void *unused)
        __flush_tlb_all();
 #endif
 
-       /* This must be done before setting cpu_online_map */
+       /* This must be done before setting cpu_online_mask */
        set_cpu_sibling_map(raw_smp_processor_id());
        wmb();
 
@@ -326,90 +323,40 @@ notrace static void __cpuinit start_secondary(void *unused)
         */
        ipi_call_lock();
        lock_vector_lock();
-       __setup_vector_irq(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+       x86_platform.nmi_init();
 
        /* enable local interrupts */
        local_irq_enable();
 
-       setup_secondary_clock();
+       /* to prevent fake stack check failure in clock setup */
+       boot_init_stack_canary();
+
+       x86_cpuinit.setup_percpu_clockev();
 
        wmb();
        cpu_idle();
 }
 
-static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* In this case, llc_shared_map is a pointer to a cpumask. */
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+                                   const struct cpuinfo_x86 *src)
 {
-       /*
-        * Mask B, Pentium, but not Pentium MMX
-        */
-       if (c->x86_vendor == X86_VENDOR_INTEL &&
-           c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
-           c->x86_model <= 3)
-               /*
-                * Remember we have B step Pentia with bugs
-                */
-               smp_b_stepping = 1;
-
-       /*
-        * Certain Athlons might work (for various values of 'work') in SMP
-        * but they are not certified as MP capable.
-        */
-       if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-               if (num_possible_cpus() == 1)
-                       goto valid_k7;
-
-               /* Athlon 660/661 is valid. */
-               if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
-                   (c->x86_mask == 1)))
-                       goto valid_k7;
-
-               /* Duron 670 is valid */
-               if ((c->x86_model == 7) && (c->x86_mask == 0))
-                       goto valid_k7;
-
-               /*
-                * Athlon 662, Duron 671, and Athlon >model 7 have capability
-                * bit. It's worth noting that the A5 stepping (662) of some
-                * Athlon XP's have the MP bit set.
-                * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
-                * more.
-                */
-               if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
-                   ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
-                    (c->x86_model > 7))
-                       if (cpu_has_mp)
-                               goto valid_k7;
-
-               /* If we get here, not a certified SMP capable AMD system. */
-               unsafe_smp = 1;
-       }
-
-valid_k7:
-       ;
+       struct cpumask *llc = dst->llc_shared_map;
+       *dst = *src;
+       dst->llc_shared_map = llc;
 }
-
-static void __cpuinit smp_checks(void)
+#else
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+                                   const struct cpuinfo_x86 *src)
 {
-       if (smp_b_stepping)
-               printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
-                                   "with B stepping processors.\n");
-
-       /*
-        * Don't taint if we are running SMP kernel on a single non-MP
-        * approved Athlon
-        */
-       if (unsafe_smp && num_online_cpus() > 1) {
-               printk(KERN_INFO "WARNING: This combination of AMD"
-                       "processors is not suitable for SMP.\n");
-               add_taint(TAINT_UNSAFE_SMP);
-       }
+       *dst = *src;
 }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
 /*
  * The bootstrap kernel entry code has set these up. Save them for
@@ -420,11 +367,10 @@ void __cpuinit smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = &cpu_data(id);
 
-       *c = boot_cpu_data;
+       copy_cpuinfo_x86(c, &boot_cpu_data);
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
-       smp_apply_quirks(c);
 }
 
 
@@ -445,15 +391,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                                cpumask_set_cpu(cpu, cpu_sibling_mask(i));
                                cpumask_set_cpu(i, cpu_core_mask(cpu));
                                cpumask_set_cpu(cpu, cpu_core_mask(i));
-                               cpumask_set_cpu(i, &c->llc_shared_map);
-                               cpumask_set_cpu(cpu, &o->llc_shared_map);
+                               cpumask_set_cpu(i, c->llc_shared_map);
+                               cpumask_set_cpu(cpu, o->llc_shared_map);
                        }
                }
        } else {
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
 
-       cpumask_set_cpu(cpu, &c->llc_shared_map);
+       cpumask_set_cpu(cpu, c->llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -464,8 +410,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpumask_set_cpu(i, &c->llc_shared_map);
-                       cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
+                       cpumask_set_cpu(i, c->llc_shared_map);
+                       cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -500,15 +446,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
         * For perf, we return last level cache shared map.
         * And for power savings, we return cpu_core_map
         */
-       if (sched_mc_power_savings || sched_smt_power_savings)
+       if ((sched_mc_power_savings || sched_smt_power_savings) &&
+           !(cpu_has(c, X86_FEATURE_AMD_DCM)))
                return cpu_core_mask(cpu);
        else
-               return &c->llc_shared_map;
-}
-
-cpumask_t cpu_coregroup_map(int cpu)
-{
-       return *cpu_coregroup_mask(cpu);
+               return c->llc_shared_map;
 }
 
 static void impress_friends(void)
@@ -575,7 +517,7 @@ void __inquire_remote_apic(int apicid)
  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  * won't ... remember to clear down the APIC, etc later.
  */
-int __devinit
+int __cpuinit
 wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
 {
        unsigned long send_status, accept_status = 0;
@@ -609,18 +551,12 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
        return (send_status | accept_status);
 }
 
-int __devinit
+static int __cpuinit
 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 {
        unsigned long send_status, accept_status = 0;
        int maxlvt, num_starts, j;
 
-       if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
-               send_status = uv_wakeup_secondary(phys_apicid, start_eip);
-               atomic_set(&init_deasserted, 1);
-               return send_status;
-       }
-
        maxlvt = lapic_get_maxlvt();
 
        /*
@@ -746,22 +682,43 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
-static int __cpuinit do_boot_cpu(int apicid, int cpu)
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+       static int current_node = -1;
+       int node = cpu_to_node(cpu);
+
+       if (system_state == SYSTEM_BOOTING) {
+               if (node != current_node) {
+                       if (current_node > (-1))
+                               pr_cont(" Ok.\n");
+                       current_node = node;
+                       pr_info("Booting Node %3d, Processors ", node);
+               }
+               pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+               return;
+       } else
+               pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+                       node, cpu, apicid);
+}
+
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
+ * Returns zero if CPU booted OK, else error code from
+ * ->wakeup_secondary_cpu.
  */
+static int __cpuinit do_boot_cpu(int apicid, int cpu)
 {
        unsigned long boot_error = 0;
-       int timeout;
        unsigned long start_ip;
-       unsigned short nmi_high = 0, nmi_low = 0;
+       int timeout;
        struct create_idle c_idle = {
-               .cpu = cpu,
-               .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+               .cpu    = cpu,
+               .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
-       INIT_WORK(&c_idle.work, do_fork_idle);
+
+       INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
@@ -787,6 +744,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        if (IS_ERR(c_idle.idle)) {
                printk("failed fork for CPU %d\n", cpu);
+               destroy_work_on_stack(&c_idle.work);
                return PTR_ERR(c_idle.idle);
        }
 
@@ -810,9 +768,8 @@ do_rest:
        /* start_ip had better be page-aligned! */
        start_ip = setup_trampoline();
 
-       /* So we see what's up   */
-       printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
-                         cpu, apicid, start_ip);
+       /* So we see what's up */
+       announce_cpu(cpu, apicid);
 
        /*
         * This grunge runs the startup process for
@@ -825,9 +782,6 @@ do_rest:
 
                pr_debug("Setting warm reset code and vector.\n");
 
-               if (apic->store_NMI_vector)
-                       apic->store_NMI_vector(&nmi_high, &nmi_low);
-
                smpboot_setup_warm_reset_vector(start_ip);
                /*
                 * Be paranoid about clearing APIC errors.
@@ -839,9 +793,13 @@ do_rest:
        }
 
        /*
-        * Starting actual IPI sequence...
+        * Kick the secondary CPU. Use the method in the APIC driver
+        * if it's defined - or use an INIT boot APIC message otherwise:
         */
-       boot_error = apic->wakeup_cpu(apicid, start_ip);
+       if (apic->wakeup_secondary_cpu)
+               boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
+       else
+               boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 
        if (!boot_error) {
                /*
@@ -860,21 +818,17 @@ do_rest:
                        udelay(100);
                }
 
-               if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
-                       /* number CPUs logically, starting from 1 (BSP is 0) */
-                       pr_debug("OK.\n");
-                       printk(KERN_INFO "CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data(cpu));
-                       pr_debug("CPU has booted.\n");
-               } else {
+               if (cpumask_test_cpu(cpu, cpu_callin_mask))
+                       pr_debug("CPU%d: has booted.\n", cpu);
+               else {
                        boot_error = 1;
                        if (*((volatile unsigned char *)trampoline_base)
                                        == 0xA5)
                                /* trampoline started but...? */
-                               printk(KERN_ERR "Stuck ??\n");
+                               pr_err("CPU%d: Stuck ??\n", cpu);
                        else
                                /* trampoline code not run */
-                               printk(KERN_ERR "Not responding.\n");
+                               pr_err("CPU%d: Not responding.\n", cpu);
                        if (apic->inquire_remote_apic)
                                apic->inquire_remote_apic(apicid);
                }
@@ -897,26 +851,17 @@ do_rest:
        /* mark "stuck" area as not stuck */
        *((volatile unsigned long *)trampoline_base) = 0;
 
-       /*
-        * Cleanup possible dangling ends...
-        */
-       smpboot_restore_warm_reset_vector();
+       if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+               /*
+                * Cleanup possible dangling ends...
+                */
+               smpboot_restore_warm_reset_vector();
+       }
 
+       destroy_work_on_stack(&c_idle.work);
        return boot_error;
 }
 
-#ifdef CONFIG_X86_64
-int default_cpu_present_to_apicid(int mps_cpu)
-{
-       return __default_cpu_present_to_apicid(mps_cpu);
-}
-
-int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
-}
-#endif
-
 int __cpuinit native_cpu_up(unsigned int cpu)
 {
        int apicid = apic->cpu_present_to_apicid(cpu);
@@ -958,7 +903,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
 
        err = do_boot_cpu(apicid, cpu);
 
-       zap_low_mappings();
+       zap_low_mappings(false);
        low_mappings = 0;
 #else
        err = do_boot_cpu(apicid, cpu);
@@ -991,9 +936,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
  */
 static __init void disable_smp(void)
 {
-       /* use the read/write pointers to the present and possible maps */
-       cpumask_copy(&cpu_present_map, cpumask_of(0));
-       cpumask_copy(&cpu_possible_map, cpumask_of(0));
+       init_cpu_present(cpumask_of(0));
+       init_cpu_possible(cpumask_of(0));
        smpboot_clear_io_apic_irqs();
 
        if (smp_found_config)
@@ -1012,14 +956,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
 {
        preempt_disable();
 
-#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
        if (def_to_bigsmp && nr_cpu_ids > 8) {
                unsigned int cpu;
                unsigned nr;
 
                printk(KERN_WARNING
                       "More than 8 CPUs detected - skipping them.\n"
-                      "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+                      "Use CONFIG_X86_BIGSMP.\n");
 
                nr = 0;
                for_each_present_cpu(cpu) {
@@ -1078,12 +1022,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
         */
        if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
            !cpu_has_apic) {
-               printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-                       boot_cpu_physical_apicid);
-               printk(KERN_ERR "... forcing use of dummy APIC emulation."
+               if (!disable_apic) {
+                       pr_err("BIOS bug, local APIC #%d not detected!...\n",
+                               boot_cpu_physical_apicid);
+                       pr_err("... forcing use of dummy APIC emulation."
                                "(tell your hw vendor)\n");
+               }
                smpboot_clear_io_apic();
-               disable_ioapic_setup();
+               arch_disable_smp_support();
                return -1;
        }
 
@@ -1125,6 +1071,8 @@ static void __init smp_cpu_index_default(void)
  */
 void __init native_smp_prepare_cpus(unsigned int max_cpus)
 {
+       unsigned int i;
+
        preempt_disable();
        smp_cpu_index_default();
        current_cpu_data = boot_cpu_data;
@@ -1138,12 +1086,15 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        boot_cpu_logical_apicid = logical_smp_processor_id();
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
+       for_each_possible_cpu(i) {
+               zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
+       }
        set_cpu_sibling_map(0);
 
-#ifdef CONFIG_X86_64
        enable_IR_x2apic();
        default_setup_apic_routing();
-#endif
 
        if (smp_sanity_check(max_cpus) < 0) {
                printk(KERN_INFO "SMP disabled\n");
@@ -1166,13 +1117,12 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
         */
        setup_local_APIC();
 
-#ifdef CONFIG_X86_64
        /*
         * Enable IO APIC before setting up error vector
         */
        if (!skip_ioapic_setup && nr_ioapics)
                enable_IO_APIC();
-#endif
+
        end_local_APIC_setup();
 
        map_cpu_to_logical_apicid();
@@ -1187,20 +1137,33 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        printk(KERN_INFO "CPU%d: ", 0);
        print_cpu_info(&cpu_data(0));
-       setup_boot_clock();
+       x86_init.timers.setup_percpu_clockev();
 
        if (is_uv_system())
                uv_system_init();
+
+       set_mtrr_aps_delayed_init();
 out:
        preempt_enable();
 }
+
+void arch_enable_nonboot_cpus_begin(void)
+{
+       set_mtrr_aps_delayed_init();
+}
+
+void arch_enable_nonboot_cpus_end(void)
+{
+       mtrr_aps_init();
+}
+
 /*
  * Early setup to make printk work.
  */
 void __init native_smp_prepare_boot_cpu(void)
 {
        int me = smp_processor_id();
-       switch_to_new_gdt();
+       switch_to_new_gdt(me);
        /* already set me in cpu_online_mask in boot_cpu_init() */
        cpumask_set_cpu(me, cpu_callout_mask);
        per_cpu(cpu_state, me) = CPU_ONLINE;
@@ -1211,11 +1174,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        pr_debug("Boot done.\n");
 
        impress_friends();
-       smp_checks();
 #ifdef CONFIG_X86_IO_APIC
        setup_ioapic_dest();
 #endif
        check_nmi_watchdog();
+       mtrr_aps_init();
 }
 
 static int __initdata setup_possible_cpus = -1;
@@ -1228,11 +1191,11 @@ early_param("possible_cpus", _setup_possible_cpus);
 
 
 /*
- * cpu_possible_map should be static, it cannot change as cpu's
+ * cpu_possible_mask should be static, it cannot change as cpu's
  * are onlined, or offlined. The reason is per-cpu data-structures
  * are allocated by some modules at init time, and dont expect to
  * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
+ * cpu_present_mask on the other hand can change dynamically.
  * In case when cpu_hotplug is not compiled, then we resort to current
  * behaviour, which is cpu_possible == cpu_present.
  * - Ashok Raj
@@ -1259,11 +1222,12 @@ __init void prefill_possible_map(void)
 
        total_cpus = max_t(int, possible, num_processors + disabled_cpus);
 
-       if (possible > CONFIG_NR_CPUS) {
+       /* nr_cpu_ids could be reduced via nr_cpus= */
+       if (possible > nr_cpu_ids) {
                printk(KERN_WARNING
                        "%d Processors exceeds NR_CPUS limit of %d\n",
-                       possible, CONFIG_NR_CPUS);
-               possible = CONFIG_NR_CPUS;
+                       possible, nr_cpu_ids);
+               possible = nr_cpu_ids;
        }
 
        printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
@@ -1313,16 +1277,7 @@ static void __ref remove_cpu_from_maps(int cpu)
 void cpu_disable_common(void)
 {
        int cpu = smp_processor_id();
-       /*
-        * HACK:
-        * Allow any queued timer interrupts to get serviced
-        * This is only a temporary solution until we cleanup
-        * fixup_irqs as we do for IA64.
-        */
-       local_irq_enable();
-       mdelay(1);
 
-       local_irq_disable();
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
@@ -1363,14 +1318,16 @@ void native_cpu_die(unsigned int cpu)
        for (i = 0; i < 10; i++) {
                /* They ack this in play_dead by setting CPU_DEAD */
                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk(KERN_INFO "CPU %d is now offline\n", cpu);
+                       if (system_state == SYSTEM_RUNNING)
+                               pr_info("CPU %u is now offline\n", cpu);
+
                        if (1 == num_online_cpus())
                                alternatives_smp_switch(0);
                        return;
                }
                msleep(100);
        }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+       pr_err("CPU %u didn't die...\n", cpu);
 }
 
 void play_dead_common(void)
@@ -1393,6 +1350,7 @@ void play_dead_common(void)
 void native_play_dead(void)
 {
        play_dead_common();
+       tboot_shutdown(TB_SHUTDOWN_WFS);
        wbinvd_halt();
 }