2 * Copyright (c) 1991,1992,1995 Linus Torvalds
3 * Copyright (c) 1994 Alan Modra
4 * Copyright (c) 1995 Markus Kuhn
5 * Copyright (c) 1996 Ingo Molnar
6 * Copyright (c) 1998 Andrea Arcangeli
7 * Copyright (c) 2002,2006 Vojtech Pavlik
8 * Copyright (c) 2003 Andi Kleen
12 #include <linux/clockchips.h>
13 #include <linux/interrupt.h>
14 #include <linux/time.h>
15 #include <linux/mca.h>
17 #include <asm/vsyscall.h>
18 #include <asm/x86_init.h>
19 #include <asm/i8259.h>
20 #include <asm/i8253.h>
21 #include <asm/timer.h>
26 #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
31 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
34 unsigned long profile_pc(struct pt_regs *regs)
36 unsigned long pc = instruction_pointer(regs);
38 /* Assume the lock function has either no stack frame or a copy
40 Eflags always has bits 22 and up cleared unlike kernel addresses. */
41 if (!user_mode_vm(regs) && in_lock_functions(pc)) {
42 #ifdef CONFIG_FRAME_POINTER
43 return *(unsigned long *)(regs->bp + sizeof(long));
45 unsigned long *sp = (unsigned long *)regs->sp;
54 EXPORT_SYMBOL(profile_pc);
57 * Default timer interrupt handler for PIT/HPET
59 static irqreturn_t timer_interrupt(int irq, void *dev_id)
61 /* Keep nmi watchdog up to date */
62 inc_irq_stat(irq0_irqs);
64 /* Optimized out for !IO_APIC and x86_64 */
67 * Subtle, when I/O APICs are used we have to ack timer IRQ
68 * manually to deassert NMI lines for the watchdog if run
69 * on an 82489DX-based system.
71 spin_lock(&i8259A_lock);
72 outb(0x0c, PIC_MASTER_OCW3);
73 /* Ack the IRQ; AEOI will end it automatically. */
75 spin_unlock(&i8259A_lock);
78 global_clock_event->event_handler(global_clock_event);
80 /* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */
82 outb_p(inb_p(0x61)| 0x80, 0x61);
88 * calibrate_cpu is used on systems with fixed rate TSCs to determine
91 #define TICK_COUNT 100000000
92 unsigned long __init calibrate_cpu(void)
94 int tsc_start, tsc_now;
96 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
99 for (i = 0; i < 4; i++)
100 if (avail_to_resrv_perfctr_nmi_bit(i))
102 no_ctr_free = (i == 4);
104 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
105 "cpu_khz value may be incorrect.\n");
107 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
108 wrmsrl(MSR_K7_EVNTSEL3, 0);
109 rdmsrl(MSR_K7_PERFCTR3, pmc3);
111 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
112 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
114 local_irq_save(flags);
115 /* start measuring cycles, incrementing from 0 */
116 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
117 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
120 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
121 tsc_now = get_cycles();
122 } while ((tsc_now - tsc_start) < TICK_COUNT);
124 local_irq_restore(flags);
126 wrmsrl(MSR_K7_EVNTSEL3, 0);
127 wrmsrl(MSR_K7_PERFCTR3, pmc3);
128 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
130 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
131 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
134 return pmc_now * tsc_khz / (tsc_now - tsc_start);
137 static struct irqaction irq0 = {
138 .handler = timer_interrupt,
139 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
143 void __init setup_default_timer_irq(void)
145 irq0.mask = cpumask_of_cpu(0);
149 /* Default timer init function */
150 void __init hpet_time_init(void)
154 setup_default_timer_irq();
157 static void x86_late_time_init(void)
159 x86_init.timers.timer_init();
163 * Initialize TSC and delay the periodic timer init to
164 * late x86_late_time_init() so ioremap works.
166 void __init time_init(void)
169 late_time_init = x86_late_time_init;