Merge branch 'iommu-fixes-2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / arch / x86 / kernel / time_64.c
index 91d4d49..cb19d65 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/time.h>
+#include <linux/mca.h>
 
 #include <asm/i8253.h>
 #include <asm/hpet.h>
@@ -33,30 +34,41 @@ unsigned long profile_pc(struct pt_regs *regs)
        /* Assume the lock function has either no stack frame or a copy
           of flags from PUSHF
           Eflags always has bits 22 and up cleared unlike kernel addresses. */
-       if (!user_mode(regs) && in_lock_functions(pc)) {
+       if (!user_mode_vm(regs) && in_lock_functions(pc)) {
+#ifdef CONFIG_FRAME_POINTER
+               return *(unsigned long *)(regs->bp + sizeof(long));
+#else
                unsigned long *sp = (unsigned long *)regs->sp;
                if (sp[0] >> 22)
                        return sp[0];
                if (sp[1] >> 22)
                        return sp[1];
+#endif
        }
        return pc;
 }
 EXPORT_SYMBOL(profile_pc);
 
-static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
+irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        add_pda(irq0_irqs, 1);
 
        global_clock_event->event_handler(global_clock_event);
 
+#ifdef CONFIG_MCA
+       if (MCA_bus) {
+               u8 irq_v = inb_p(0x61);       /* read the current state */
+               outb_p(irq_v|0x80, 0x61);     /* reset the IRQ */
+       }
+#endif
+
        return IRQ_HANDLED;
 }
 
 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
  * processor frequency */
 #define TICK_COUNT 100000000
-unsigned long __init native_calculate_cpu_khz(void)
+unsigned long __init calibrate_cpu(void)
 {
        int tsc_start, tsc_now;
        int i, no_ctr_free;
@@ -77,13 +89,13 @@ unsigned long __init native_calculate_cpu_khz(void)
                reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
        }
        local_irq_save(flags);
-       /* start meauring cycles, incrementing from 0 */
+       /* start measuring cycles, incrementing from 0 */
        wrmsrl(MSR_K7_PERFCTR0 + i, 0);
        wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
        rdtscl(tsc_start);
        do {
                rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
-               tsc_now = get_cycles_sync();
+               tsc_now = get_cycles();
        } while ((tsc_now - tsc_start) < TICK_COUNT);
 
        local_irq_restore(flags);
@@ -100,7 +112,7 @@ unsigned long __init native_calculate_cpu_khz(void)
 }
 
 static struct irqaction irq0 = {
-       .handler        = timer_event_interrupt,
+       .handler        = timer_interrupt,
        .flags          = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
        .mask           = CPU_MASK_NONE,
        .name           = "timer"
@@ -111,29 +123,13 @@ void __init hpet_time_init(void)
        if (!hpet_enable())
                setup_pit_timer();
 
+       irq0.mask = cpumask_of_cpu(0);
        setup_irq(0, &irq0);
 }
 
 void __init time_init(void)
 {
-       tsc_calibrate();
-
-       cpu_khz = tsc_khz;
-       if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
-               boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-               boot_cpu_data.x86 == 16)
-               cpu_khz = calculate_cpu_khz();
-
-       if (unsynchronized_tsc())
-               mark_tsc_unstable("TSCs unsynchronized");
-
-       if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
-               vgetcpu_mode = VGETCPU_RDTSCP;
-       else
-               vgetcpu_mode = VGETCPU_LSL;
+       tsc_init();
 
-       printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
-               cpu_khz / 1000, cpu_khz % 1000);
-       init_tsc_clocksource();
        late_time_init = choose_time_init();
 }