perf_events, x86: Implement intel core solo/duo support
[safe/jmp/linux-2.6] / arch / x86 / kernel / uv_time.c
index 6f8e325..3c84aa0 100644 (file)
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/bios.h>
 #include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
 
 #define RTC_NAME               "sgi_rtc"
 
-static cycle_t uv_read_rtc(void);
+static cycle_t uv_read_rtc(struct clocksource *cs);
 static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
 static void uv_rtc_timer_setup(enum clock_event_mode,
                                struct clock_event_device *);
@@ -72,7 +74,7 @@ struct uv_rtc_timer_head {
  */
 static struct uv_rtc_timer_head                **blade_info __read_mostly;
 
-static int                             uv_rtc_enable;
+static int                             uv_rtc_evt_enable;
 
 /*
  * Hardware interface routines
@@ -84,11 +86,11 @@ static void uv_rtc_send_IPI(int cpu)
        unsigned long apicid, val;
        int pnode;
 
-       apicid = per_cpu(x86_cpu_to_apicid, cpu);
+       apicid = cpu_physical_id(cpu);
        pnode = uv_apicid_to_pnode(apicid);
        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
              (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
-             (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
+             (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
 
        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 }
@@ -113,7 +115,7 @@ static int uv_setup_intr(int cpu, u64 expires)
        uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
                UVH_EVENT_OCCURRED0_RTC1_MASK);
 
-       val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
+       val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
                ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
 
        /* Set configuration */
@@ -121,7 +123,10 @@ static int uv_setup_intr(int cpu, u64 expires)
        /* Initialize comparator value */
        uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
 
-       return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
+       if (uv_read_rtc(NULL) <= expires)
+               return 0;
+
+       return !uv_intr_pending(pnode);
 }
 
 /*
@@ -221,6 +226,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
 
        next_cpu = head->next_cpu;
        *t = expires;
+
        /* Will this one be next to go off? */
        if (next_cpu < 0 || bcpu == next_cpu ||
                        expires < head->cpu[next_cpu].expires) {
@@ -229,7 +235,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
                        *t = ULLONG_MAX;
                        uv_rtc_find_next_timer(head, pnode);
                        spin_unlock_irqrestore(&head->lock, flags);
-                       return 1;
+                       return -ETIME;
                }
        }
 
@@ -242,7 +248,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
  *
  * Returns 1 if this timer was pending.
  */
-static int uv_rtc_unset_timer(int cpu)
+static int uv_rtc_unset_timer(int cpu, int force)
 {
        int pnode = uv_cpu_to_pnode(cpu);
        int bid = uv_cpu_to_blade_id(cpu);
@@ -254,14 +260,15 @@ static int uv_rtc_unset_timer(int cpu)
 
        spin_lock_irqsave(&head->lock, flags);
 
-       if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
+       if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
                rc = 1;
 
-       *t = ULLONG_MAX;
-
-       /* Was the hardware setup for this timer? */
-       if (head->next_cpu == bcpu)
-               uv_rtc_find_next_timer(head, pnode);
+       if (rc) {
+               *t = ULLONG_MAX;
+               /* Was the hardware setup for this timer? */
+               if (head->next_cpu == bcpu)
+                       uv_rtc_find_next_timer(head, pnode);
+       }
 
        spin_unlock_irqrestore(&head->lock, flags);
 
@@ -276,7 +283,7 @@ static int uv_rtc_unset_timer(int cpu)
 /*
  * Read the RTC.
  */
-static cycle_t uv_read_rtc(void)
+static cycle_t uv_read_rtc(struct clocksource *cs)
 {
        return (cycle_t)uv_read_local_mmr(UVH_RTC);
 }
@@ -289,7 +296,7 @@ static int uv_rtc_next_event(unsigned long delta,
 {
        int ced_cpu = cpumask_first(ced->cpumask);
 
-       return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
+       return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
 }
 
 /*
@@ -308,32 +315,32 @@ static void uv_rtc_timer_setup(enum clock_event_mode mode,
                break;
        case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
-               uv_rtc_unset_timer(ced_cpu);
+               uv_rtc_unset_timer(ced_cpu, 1);
                break;
        }
 }
 
 static void uv_rtc_interrupt(void)
 {
-       struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
        int cpu = smp_processor_id();
+       struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
 
        if (!ced || !ced->event_handler)
                return;
 
-       if (uv_rtc_unset_timer(cpu) != 1)
+       if (uv_rtc_unset_timer(cpu, 0) != 1)
                return;
 
        ced->event_handler(ced);
 }
 
-static int __init uv_enable_rtc(char *str)
+static int __init uv_enable_evt_rtc(char *str)
 {
-       uv_rtc_enable = 1;
+       uv_rtc_evt_enable = 1;
 
        return 1;
 }
-__setup("uvrtc", uv_enable_rtc);
+__setup("uvrtcevt", uv_enable_evt_rtc);
 
 static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
 {
@@ -348,27 +355,32 @@ static __init int uv_rtc_setup_clock(void)
 {
        int rc;
 
-       if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
+       if (!is_uv_system())
                return -ENODEV;
 
-       generic_interrupt_extension = uv_rtc_interrupt;
-
        clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
                                clocksource_uv.shift);
 
+       /* If single blade, prefer tsc */
+       if (uv_num_possible_blades() == 1)
+               clocksource_uv.rating = 250;
+
        rc = clocksource_register(&clocksource_uv);
-       if (rc) {
-               generic_interrupt_extension = NULL;
+       if (rc)
+               printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
+       else
+               printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
+                       sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+       if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
                return rc;
-       }
 
        /* Setup and register clockevents */
        rc = uv_rtc_allocate_timers();
-       if (rc) {
-               clocksource_unregister(&clocksource_uv);
-               generic_interrupt_extension = NULL;
-               return rc;
-       }
+       if (rc)
+               goto error;
+
+       x86_platform_ipi_callback = uv_rtc_interrupt;
 
        clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
                                NSEC_PER_SEC, clock_event_device_uv.shift);
@@ -381,11 +393,19 @@ static __init int uv_rtc_setup_clock(void)
 
        rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
        if (rc) {
-               clocksource_unregister(&clocksource_uv);
-               generic_interrupt_extension = NULL;
+               x86_platform_ipi_callback = NULL;
                uv_rtc_deallocate_timers();
+               goto error;
        }
 
+       printk(KERN_INFO "UV RTC clockevents registered\n");
+
+       return 0;
+
+error:
+       clocksource_unregister(&clocksource_uv);
+       printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
+
        return rc;
 }
 arch_initcall(uv_rtc_setup_clock);