x86: fold apic_ops into genapic
[safe/jmp/linux-2.6] / arch / x86 / kernel / genx2apic_uv_x.c
index 53bd257..9ae4a92 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/threads.h>
+#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <asm/current.h>
 #include <asm/smp.h>
-#include <asm/ipi.h>
 #include <asm/genapic.h>
+#include <asm/ipi.h>
 #include <asm/pgtable.h>
+#include <asm/uv/uv.h>
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/bios.h>
@@ -75,15 +80,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 
 /* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
 
-static const cpumask_t *uv_target_cpus(void)
+static const struct cpumask *uv_target_cpus(void)
 {
-       return &cpumask_of_cpu(0);
+       return cpumask_of(0);
 }
 
-static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask)
+static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
 {
-       cpus_clear(*retmask);
-       cpu_set(cpu, *retmask);
+       cpumask_clear(retmask);
+       cpumask_set_cpu(cpu, retmask);
 }
 
 int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
@@ -109,50 +114,52 @@ int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
 
 static void uv_send_IPI_one(int cpu, int vector)
 {
-       unsigned long val, apicid, lapicid;
+       unsigned long val, apicid;
        int pnode;
 
        apicid = per_cpu(x86_cpu_to_apicid, cpu);
-       lapicid = apicid & 0x3f;                /* ZZZ macro needed */
        pnode = uv_apicid_to_pnode(apicid);
-       val =
-           (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
-                                             UVH_IPI_INT_APIC_ID_SHFT) |
-           (vector << UVH_IPI_INT_VECTOR_SHFT);
+
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+             (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+             (vector << UVH_IPI_INT_VECTOR_SHFT);
+
        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 }
 
-static void uv_send_IPI_mask(const cpumask_t *mask, int vector)
+static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
 {
        unsigned int cpu;
 
-       for_each_cpu_mask_nr(cpu, *mask)
+       for_each_cpu(cpu, mask)
                uv_send_IPI_one(cpu, vector);
 }
 
-static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector)
+static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned int cpu;
        unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
 
-       for_each_cpu_mask_nr(cpu, *mask)
+       for_each_cpu(cpu, mask) {
                if (cpu != this_cpu)
                        uv_send_IPI_one(cpu, vector);
+       }
 }
 
 static void uv_send_IPI_allbutself(int vector)
 {
-       unsigned int cpu;
        unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
 
-       for_each_online_cpu(cpu)
+       for_each_online_cpu(cpu) {
                if (cpu != this_cpu)
                        uv_send_IPI_one(cpu, vector);
+       }
 }
 
 static void uv_send_IPI_all(int vector)
 {
-       uv_send_IPI_mask(&cpu_online_map, vector);
+       uv_send_IPI_mask(cpu_online_mask, vector);
 }
 
 static int uv_apic_id_registered(void)
@@ -164,23 +171,23 @@ static void uv_init_apic_ldr(void)
 {
 }
 
-static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
+static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
 {
-       int cpu;
-
        /*
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       cpu = first_cpu(*cpumask);
+       int cpu = cpumask_first(cpumask);
+
        if ((unsigned)cpu < nr_cpu_ids)
                return per_cpu(x86_cpu_to_apicid, cpu);
        else
                return BAD_APICID;
 }
 
-static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
-                                             const cpumask_t *andmask)
+static unsigned int
+uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                         const struct cpumask *andmask)
 {
        int cpu;
 
@@ -188,13 +195,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
         * We're using fixed IRQ delivery, can only return one phys APIC ID.
         * May as well be the first.
         */
-       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
-               if (cpu_isset(cpu, *andmask))
-                       return per_cpu(x86_cpu_to_apicid, cpu);
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+       if (cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+
        return BAD_APICID;
 }
 
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_get_apic_id(unsigned long x)
 {
        unsigned int id;
 
@@ -216,10 +227,10 @@ static unsigned long set_apic_id(unsigned int id)
 static unsigned int uv_read_apic_id(void)
 {
 
-       return get_apic_id(apic_read(APIC_ID));
+       return x2apic_get_apic_id(apic_read(APIC_ID));
 }
 
-static unsigned int phys_pkg_id(int index_msb)
+static int uv_phys_pkg_id(int initial_apicid, int index_msb)
 {
        return uv_read_apic_id() >> index_msb;
 }
@@ -230,25 +241,64 @@ static void uv_send_IPI_self(int vector)
 }
 
 struct genapic apic_x2apic_uv_x = {
-       .name = "UV large system",
-       .acpi_madt_oem_check = uv_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = uv_target_cpus,
-       .vector_allocation_domain = uv_vector_allocation_domain,
-       .apic_id_registered = uv_apic_id_registered,
-       .init_apic_ldr = uv_init_apic_ldr,
-       .send_IPI_all = uv_send_IPI_all,
-       .send_IPI_allbutself = uv_send_IPI_allbutself,
-       .send_IPI_mask = uv_send_IPI_mask,
-       .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
-       .send_IPI_self = uv_send_IPI_self,
-       .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
+
+       .name                           = "UV large system",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = uv_acpi_madt_oem_check,
+       .apic_id_registered             = uv_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = uv_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = uv_vector_allocation_domain,
+       .init_apic_ldr                  = uv_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = uv_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = uv_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = uv_send_IPI_mask,
+       .send_IPI_mask_allbutself       = uv_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = uv_send_IPI_allbutself,
+       .send_IPI_all                   = uv_send_IPI_all,
+       .send_IPI_self                  = uv_send_IPI_self,
+
+       .wakeup_cpu                     = NULL,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .store_NMI_vector               = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_msr_read,
+       .write                          = native_apic_msr_write,
+       .icr_read                       = native_x2apic_icr_read,
+       .icr_write                      = native_x2apic_icr_write,
+       .wait_icr_idle                  = native_x2apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
 };
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
@@ -381,6 +431,103 @@ static __init void uv_rtc_init(void)
 }
 
 /*
+ * percpu heartbeat timer
+ */
+static void uv_heartbeat(unsigned long ignored)
+{
+       struct timer_list *timer = &uv_hub_info->scir.timer;
+       unsigned char bits = uv_hub_info->scir.state;
+
+       /* flip heartbeat bit */
+       bits ^= SCIR_CPU_HEARTBEAT;
+
+       /* is this cpu idle? */
+       if (idle_cpu(raw_smp_processor_id()))
+               bits &= ~SCIR_CPU_ACTIVITY;
+       else
+               bits |= SCIR_CPU_ACTIVITY;
+
+       /* update system controller interface reg */
+       uv_set_scir_bits(bits);
+
+       /* enable next timer period */
+       mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+}
+
+static void __cpuinit uv_heartbeat_enable(int cpu)
+{
+       if (!uv_cpu_hub_info(cpu)->scir.enabled) {
+               struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
+
+               uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
+               setup_timer(timer, uv_heartbeat, cpu);
+               timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
+               add_timer_on(timer, cpu);
+               uv_cpu_hub_info(cpu)->scir.enabled = 1;
+       }
+
+       /* check boot cpu */
+       if (!uv_cpu_hub_info(0)->scir.enabled)
+               uv_heartbeat_enable(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cpuinit uv_heartbeat_disable(int cpu)
+{
+       if (uv_cpu_hub_info(cpu)->scir.enabled) {
+               uv_cpu_hub_info(cpu)->scir.enabled = 0;
+               del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
+       }
+       uv_set_cpu_scir_bits(cpu, 0xff);
+}
+
+/*
+ * cpu hotplug notifier
+ */
+static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
+                                      unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+               uv_heartbeat_enable(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               uv_heartbeat_disable(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+       hotcpu_notifier(uv_scir_cpu_notify, 0);
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+}
+
+static __init int uv_init_heartbeat(void)
+{
+       int cpu;
+
+       if (is_uv_system())
+               for_each_online_cpu(cpu)
+                       uv_heartbeat_enable(cpu);
+       return 0;
+}
+
+late_initcall(uv_init_heartbeat);
+
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+/*
  * Called on each cpu to initialize the per_cpu UV data area.
  *     ZZZ hotplug not supported yet
  */
@@ -453,7 +600,7 @@ void __init uv_system_init(void)
 
        uv_bios_init();
        uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
-                           &uv_coherency_id, &uv_region_size);
+                           &sn_coherency_id, &sn_region_size);
        uv_rtc_init();
 
        for_each_present_cpu(cpu) {
@@ -464,8 +611,7 @@ void __init uv_system_init(void)
                uv_blade_info[blade].nr_possible_cpus++;
 
                uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
-               uv_cpu_hub_info(cpu)->lowmem_remap_top =
-                                       lowmem_redir_base + lowmem_redir_size;
+               uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
                uv_cpu_hub_info(cpu)->m_val = m_val;
                uv_cpu_hub_info(cpu)->n_val = m_val;
                uv_cpu_hub_info(cpu)->numa_blade_id = blade;
@@ -475,7 +621,8 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
                uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
                uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
-               uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
+               uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+               uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
                max_pnode = max(pnode, max_pnode);
@@ -492,4 +639,6 @@ void __init uv_system_init(void)
        map_mmioh_high(max_pnode);
 
        uv_cpu_init();
+       uv_scir_register_cpu_notifier();
+       proc_mkdir("sgi_uv", NULL);
 }