1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/timer.h>
6 #include <linux/acpi_pmtmr.h>
7 #include <linux/cpufreq.h>
9 #include <linux/delay.h>
10 #include <linux/clocksource.h>
11 #include <linux/percpu.h>
14 #include <asm/timer.h>
15 #include <asm/vgtod.h>
17 #include <asm/delay.h>
19 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
20 EXPORT_SYMBOL(cpu_khz);
22 EXPORT_SYMBOL(tsc_khz);
25 * TSC can be unstable due to cpufreq or due to unsynced TSCs
27 static int tsc_unstable;
29 /* native_sched_clock() is called before tsc_init(), so
30 we must start with the TSC soft disabled to prevent
31 erroneous rdtsc usage on !cpu_has_tsc processors */
32 static int tsc_disabled = -1;
35 * Scheduler clock - returns current time in nanosec units.
37 u64 native_sched_clock(void)
42 * Fall back to jiffies if there's no TSC available:
43 * ( But note that we still use it if the TSC is marked
44 * unstable. We do this because unlike Time Of Day,
45 * the scheduler clock tolerates small errors and it's
46 * very important for it to be as fast as the platform
49 if (unlikely(tsc_disabled)) {
50 /* No locking but a rare wrong value is not a big deal: */
51 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
54 /* read the Time Stamp Counter: */
57 /* return the value in ns */
58 return cycles_2_ns(this_offset);
61 /* We need to define a real function for sched_clock, to override the
62 weak default version */
63 #ifdef CONFIG_PARAVIRT
64 unsigned long long sched_clock(void)
66 return paravirt_sched_clock();
70 sched_clock(void) __attribute__((alias("native_sched_clock")));
73 int check_tsc_unstable(void)
77 EXPORT_SYMBOL_GPL(check_tsc_unstable);
80 int __init notsc_setup(char *str)
82 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
83 "cannot disable TSC completely.\n");
89 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
92 int __init notsc_setup(char *str)
94 setup_clear_cpu_cap(X86_FEATURE_TSC);
99 __setup("notsc", notsc_setup);
101 #define MAX_RETRIES 5
102 #define SMI_TRESHOLD 50000
105 * Read TSC and the reference counters. Take care of SMI disturbance
107 static u64 __init tsc_read_refs(u64 *pm, u64 *hpet)
112 for (i = 0; i < MAX_RETRIES; i++) {
115 *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
117 *pm = acpi_pm_read_early();
119 if ((t2 - t1) < SMI_TRESHOLD)
126 * tsc_calibrate - calibrate the tsc on boot
128 static unsigned int __init tsc_calibrate(void)
131 u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2;
132 int hpet = is_hpet_enabled();
133 unsigned int tsc_khz_val = 0;
135 local_irq_save(flags);
137 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
139 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
142 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
143 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
145 while ((inb(0x61) & 0x20) == 0);
148 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
150 local_irq_restore(flags);
153 * Preset the result with the raw and inaccurate PIT
160 /* hpet or pmtimer available ? */
161 if (!hpet && !pm1 && !pm2) {
162 printk(KERN_INFO "TSC calibrated against PIT\n");
166 /* Check, whether the sampling was disturbed by an SMI */
167 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) {
168 printk(KERN_WARNING "TSC calibration disturbed by SMI, "
169 "using PIT calibration result\n");
173 tsc2 = (tsc2 - tsc1) * 1000000LL;
176 printk(KERN_INFO "TSC calibrated against HPET\n");
178 hpet2 += 0x100000000ULL;
180 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
181 do_div(tsc1, 1000000);
183 printk(KERN_INFO "TSC calibrated against PM_TIMER\n");
185 pm2 += (u64)ACPI_PM_OVRRUN;
187 tsc1 = pm2 * 1000000000LL;
188 do_div(tsc1, PMTMR_TICKS_PER_SEC);
198 unsigned long native_calculate_cpu_khz(void)
200 return tsc_calibrate();
204 /* Only called from the Powernow K7 cpu freq driver */
205 int recalibrate_cpu_khz(void)
208 unsigned long cpu_khz_old = cpu_khz;
211 cpu_khz = calculate_cpu_khz();
213 cpu_data(0).loops_per_jiffy =
214 cpufreq_scale(cpu_data(0).loops_per_jiffy,
215 cpu_khz_old, cpu_khz);
224 EXPORT_SYMBOL(recalibrate_cpu_khz);
226 #endif /* CONFIG_X86_32 */
228 /* Accelerators for sched_clock()
229 * convert from cycles(64bits) => nanoseconds (64bits)
231 * ns = cycles / (freq / ns_per_sec)
232 * ns = cycles * (ns_per_sec / freq)
233 * ns = cycles * (10^9 / (cpu_khz * 10^3))
234 * ns = cycles * (10^6 / cpu_khz)
236 * Then we use scaling math (suggested by george@mvista.com) to get:
237 * ns = cycles * (10^6 * SC / cpu_khz) / SC
238 * ns = cycles * cyc2ns_scale / SC
240 * And since SC is a constant power of two, we can convert the div
243 * We can use khz divisor instead of mhz to keep a better precision, since
244 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
245 * (mathieu.desnoyers@polymtl.ca)
247 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
250 DEFINE_PER_CPU(unsigned long, cyc2ns);
252 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
254 unsigned long long tsc_now, ns_now;
255 unsigned long flags, *scale;
257 local_irq_save(flags);
258 sched_clock_idle_sleep_event();
260 scale = &per_cpu(cyc2ns, cpu);
263 ns_now = __cycles_2_ns(tsc_now);
266 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
268 sched_clock_idle_wakeup_event(0);
269 local_irq_restore(flags);
272 #ifdef CONFIG_CPU_FREQ
274 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
277 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
278 * not that important because current Opteron setups do not support
279 * scaling on SMP anyroads.
281 * Should fix up last_tsc too. Currently gettimeofday in the
282 * first tick after the change will be slightly wrong.
285 static unsigned int ref_freq;
286 static unsigned long loops_per_jiffy_ref;
287 static unsigned long tsc_khz_ref;
289 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
292 struct cpufreq_freqs *freq = data;
293 unsigned long *lpj, dummy;
295 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
299 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
301 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
303 lpj = &boot_cpu_data.loops_per_jiffy;
307 ref_freq = freq->old;
308 loops_per_jiffy_ref = *lpj;
309 tsc_khz_ref = tsc_khz;
311 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
312 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
313 (val == CPUFREQ_RESUMECHANGE)) {
314 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
316 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
317 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
318 mark_tsc_unstable("cpufreq changes");
321 set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
326 static struct notifier_block time_cpufreq_notifier_block = {
327 .notifier_call = time_cpufreq_notifier
330 static int __init cpufreq_tsc(void)
332 cpufreq_register_notifier(&time_cpufreq_notifier_block,
333 CPUFREQ_TRANSITION_NOTIFIER);
337 core_initcall(cpufreq_tsc);
339 #endif /* CONFIG_CPU_FREQ */
341 /* clocksource code */
343 static struct clocksource clocksource_tsc;
346 * We compare the TSC to the cycle_last value in the clocksource
347 * structure to avoid a nasty time-warp. This can be observed in a
348 * very small window right after one CPU updated cycle_last under
349 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
350 * is smaller than the cycle_last reference value due to a TSC which
351 * is slighty behind. This delta is nowhere else observable, but in
352 * that case it results in a forward time jump in the range of hours
353 * due to the unsigned delta calculation of the time keeping core
354 * code, which is necessary to support wrapping clocksources like pm
357 static cycle_t read_tsc(void)
359 cycle_t ret = (cycle_t)get_cycles();
361 return ret >= clocksource_tsc.cycle_last ?
362 ret : clocksource_tsc.cycle_last;
365 static cycle_t __vsyscall_fn vread_tsc(void)
367 cycle_t ret = (cycle_t)vget_cycles();
369 return ret >= __vsyscall_gtod_data.clock.cycle_last ?
370 ret : __vsyscall_gtod_data.clock.cycle_last;
373 static struct clocksource clocksource_tsc = {
377 .mask = CLOCKSOURCE_MASK(64),
379 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
380 CLOCK_SOURCE_MUST_VERIFY,
386 void mark_tsc_unstable(char *reason)
390 printk("Marking TSC unstable due to %s\n", reason);
391 /* Change only the rating, when not registered */
392 if (clocksource_tsc.mult)
393 clocksource_change_rating(&clocksource_tsc, 0);
395 clocksource_tsc.rating = 0;
399 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
401 static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
403 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
409 /* List of systems that have known TSC problems */
410 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
412 .callback = dmi_mark_tsc_unstable,
413 .ident = "IBM Thinkpad 380XD",
415 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
416 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
423 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
425 #ifdef CONFIG_MGEODE_LX
426 /* RTSC counts during suspend */
427 #define RTSC_SUSP 0x100
429 static void __init check_geode_tsc_reliable(void)
431 unsigned long res_low, res_high;
433 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
434 if (res_low & RTSC_SUSP)
435 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
438 static inline void check_geode_tsc_reliable(void) { }
442 * Make an educated guess if the TSC is trustworthy and synchronized
445 __cpuinit int unsynchronized_tsc(void)
447 if (!cpu_has_tsc || tsc_unstable)
451 if (apic_is_clustered_box())
455 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
458 * Intel systems are normally all synchronized.
459 * Exceptions must mark TSC as unstable:
461 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
462 /* assume multi socket systems are not synchronized: */
463 if (num_possible_cpus() > 1)
470 static void __init init_tsc_clocksource(void)
472 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
473 clocksource_tsc.shift);
474 /* lower the rating if we already know its unstable: */
475 if (check_tsc_unstable()) {
476 clocksource_tsc.rating = 0;
477 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
479 clocksource_register(&clocksource_tsc);
482 void __init tsc_init(void)
490 cpu_khz = calculate_cpu_khz();
494 mark_tsc_unstable("could not calculate TSC khz");
499 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
500 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
501 cpu_khz = calibrate_cpu();
504 lpj = ((u64)tsc_khz * 1000);
508 printk("Detected %lu.%03lu MHz processor.\n",
509 (unsigned long)cpu_khz / 1000,
510 (unsigned long)cpu_khz % 1000);
513 * Secondary CPUs do not run through tsc_init(), so set up
514 * all the scale factors for all CPUs, assuming the same
515 * speed as the bootup CPU. (cpufreq notifiers will fix this
516 * up if their speed diverges)
518 for_each_possible_cpu(cpu)
519 set_cyc2ns_scale(cpu_khz, cpu);
521 if (tsc_disabled > 0)
524 /* now allow native_sched_clock() to use rdtsc */
528 /* Check and install the TSC clocksource */
529 dmi_check_system(bad_tsc_dmi_table);
531 if (unsynchronized_tsc())
532 mark_tsc_unstable("TSCs unsynchronized");
534 check_geode_tsc_reliable();
535 init_tsc_clocksource();