x86: cleanup early per cpu variables/accesses v4
[safe/jmp/linux-2.6] / arch / x86 / kernel / apic_64.c
index f6eb01d..4fd21f7 100644 (file)
 #include <asm/mpspec.h>
 #include <asm/hpet.h>
 #include <asm/pgalloc.h>
-#include <asm/mach_apic.h>
 #include <asm/nmi.h>
 #include <asm/idle.h>
 #include <asm/proto.h>
 #include <asm/timex.h>
 #include <asm/apic.h>
 
+#include <mach_ipi.h>
+#include <mach_apic.h>
+
 int disable_apic_timer __cpuinitdata;
 static int apic_calibrate_pmtmr __initdata;
 int disable_apic;
@@ -83,6 +85,9 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
 static unsigned long apic_phys;
 
+unsigned long mp_lapic_addr;
+
+unsigned int __cpuinitdata maxcpus = NR_CPUS;
 /*
  * Get the LAPIC version
  */
@@ -352,7 +357,8 @@ static void __init calibrate_APIC_clock(void)
                result / 1000 / 1000, result / 1000 % 1000);
 
        /* Calculate the scaled math multiplication factor */
-       lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
+       lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
+                                      lapic_clockevent.shift);
        lapic_clockevent.max_delta_ns =
                clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
        lapic_clockevent.min_delta_ns =
@@ -421,7 +427,7 @@ void __init setup_boot_APIC_clock(void)
  * set the DUMMY flag again and force the broadcast mode in the
  * clockevents layer.
  */
-void __cpuinit check_boot_apic_timer_broadcast(void)
+static void __cpuinit check_boot_apic_timer_broadcast(void)
 {
        if (!disable_apic_timer ||
            (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
@@ -431,7 +437,8 @@ void __cpuinit check_boot_apic_timer_broadcast(void)
        lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
 
        local_irq_enable();
-       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+                          &boot_cpu_physical_apicid);
        local_irq_disable();
 }
 
@@ -524,7 +531,7 @@ int setup_profiling_timer(unsigned int multiplier)
  */
 void clear_local_APIC(void)
 {
-       int maxlvt = lapic_get_maxlvt();
+       int maxlvt;
        u32 v;
 
        /* APIC hasn't been mapped yet */
@@ -640,10 +647,10 @@ int __init verify_local_APIC(void)
        /*
         * The ID register is read/write in a real APIC.
         */
-       reg0 = apic_read(APIC_ID);
+       reg0 = read_apic_id();
        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
        apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
-       reg1 = apic_read(APIC_ID);
+       reg1 = read_apic_id();
        apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
        apic_write(APIC_ID, reg0);
        if (reg1 != (reg0 ^ APIC_ID_MASK))
@@ -728,6 +735,7 @@ void __cpuinit setup_local_APIC(void)
        unsigned int value;
        int i, j;
 
+       preempt_disable();
        value = apic_read(APIC_LVR);
 
        BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
@@ -821,9 +829,10 @@ void __cpuinit setup_local_APIC(void)
        else
                value = APIC_DM_NMI | APIC_LVT_MASKED;
        apic_write(APIC_LVT1, value);
+       preempt_enable();
 }
 
-void __cpuinit lapic_setup_esr(void)
+static void __cpuinit lapic_setup_esr(void)
 {
        unsigned maxlvt = lapic_get_maxlvt();
 
@@ -857,10 +866,34 @@ static int __init detect_init_APIC(void)
        }
 
        mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-       boot_cpu_id = 0;
+       boot_cpu_physical_apicid = 0;
        return 0;
 }
 
+void __init early_init_lapic_mapping(void)
+{
+       unsigned long apic_phys;
+
+       /*
+        * If no local APIC can be found then go out
+        * : it means there is no mpatable and MADT
+        */
+       if (!smp_found_config)
+               return;
+
+       apic_phys = mp_lapic_addr;
+
+       set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+       apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+                                APIC_BASE, apic_phys);
+
+       /*
+        * Fetch the APIC ID of the BSP in case we have a
+        * default configuration (or the MP table is broken).
+        */
+       boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+}
+
 /**
  * init_apic_mappings - initialize APIC mappings
  */
@@ -885,7 +918,7 @@ void __init init_apic_mappings(void)
         * Fetch the APIC ID of the BSP in case we have a
         * default configuration (or the MP table is broken).
         */
-       boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+       boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
 }
 
 /*
@@ -906,8 +939,8 @@ int __init APIC_init_uniprocessor(void)
 
        verify_local_APIC();
 
-       phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
-       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
+       phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
+       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
 
        setup_local_APIC();
 
@@ -1024,6 +1057,52 @@ void disconnect_bsp_APIC(int virt_wire_setup)
        apic_write(APIC_LVT1, value);
 }
 
+void __cpuinit generic_processor_info(int apicid, int version)
+{
+       int cpu;
+       cpumask_t tmp_map;
+
+       if (num_processors >= NR_CPUS) {
+               printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
+                      " Processor ignored.\n", NR_CPUS);
+               return;
+       }
+
+       if (num_processors >= maxcpus) {
+               printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
+                      " Processor ignored.\n", maxcpus);
+               return;
+       }
+
+       num_processors++;
+       cpus_complement(tmp_map, cpu_present_map);
+       cpu = first_cpu(tmp_map);
+
+       physid_set(apicid, phys_cpu_present_map);
+       if (apicid == boot_cpu_physical_apicid) {
+               /*
+                * x86_bios_cpu_apicid is required to have processors listed
+                * in same order as logical cpu numbers. Hence the first
+                * entry is BSP, and so on.
+                */
+               cpu = 0;
+       }
+       /* are we being called early in kernel startup? */
+       if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
+               u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
+               u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
+
+               cpu_to_apicid[cpu] = apicid;
+               bios_cpu_apicid[cpu] = apicid;
+       } else {
+               per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+               per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
+       }
+
+       cpu_set(cpu, cpu_possible_map);
+       cpu_set(cpu, cpu_present_map);
+}
+
 /*
  * Power management
  */
@@ -1060,7 +1139,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
 
        maxlvt = lapic_get_maxlvt();
 
-       apic_pm_state.apic_id = apic_read(APIC_ID);
+       apic_pm_state.apic_id = read_apic_id();
        apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
        apic_pm_state.apic_ldr = apic_read(APIC_LDR);
        apic_pm_state.apic_dfr = apic_read(APIC_DFR);
@@ -1184,10 +1263,10 @@ __cpuinit int apic_is_clustered_box(void)
         * will be [4, 0x23] or [8, 0x27] could be thought to
         * vsmp box still need checking...
         */
-       if (!is_vsmp_box() && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
+       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
                return 0;
 
-       bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+       bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
        bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
 
        for (i = 0; i < NR_CPUS; i++) {
@@ -1224,6 +1303,12 @@ __cpuinit int apic_is_clustered_box(void)
                        ++zeros;
        }
 
+       /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
+        * not guaranteed to be synced between boards
+        */
+       if (is_vsmp_box() && clusters > 1)
+               return 1;
+
        /*
         * If clusters > 2, then should be multi-chassis.
         * May have to revisit this when multi-core + hyperthreaded CPUs come