2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
7 #include <linux/clocksource.h>
8 #include <linux/workqueue.h>
9 #include <linux/cpufreq.h>
10 #include <linux/jiffies.h>
11 #include <linux/init.h>
12 #include <linux/dmi.h>
14 #include <asm/delay.h>
18 #include "mach_timer.h"
21 * On some systems the TSC frequency does not
22 * change with the cpu frequency. So we need
23 * an extra value to store the TSC freq
26 unsigned long long (*custom_sched_clock)(void);
31 static int __init tsc_setup(char *str)
33 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
34 "cannot disable TSC.\n");
39 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
42 static int __init tsc_setup(char *str)
50 __setup("notsc", tsc_setup);
53 * code to mark and check if the TSC is unstable
54 * due to cpufreq or due to unsynced TSCs
56 static int tsc_unstable;
58 static inline int check_tsc_unstable(void)
63 void mark_tsc_unstable(void)
67 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
69 /* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits)
72 * ns = cycles / (freq / ns_per_sec)
73 * ns = cycles * (ns_per_sec / freq)
74 * ns = cycles * (10^9 / (cpu_khz * 10^3))
75 * ns = cycles * (10^6 / cpu_khz)
77 * Then we use scaling math (suggested by george@mvista.com) to get:
78 * ns = cycles * (10^6 * SC / cpu_khz) / SC
79 * ns = cycles * cyc2ns_scale / SC
81 * And since SC is a constant power of two, we can convert the div
84 * We can use khz divisor instead of mhz to keep a better percision, since
85 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
86 * (mathieu.desnoyers@polymtl.ca)
88 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
90 static unsigned long cyc2ns_scale __read_mostly;
92 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
94 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
96 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
99 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
101 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
105 * Scheduler clock - returns current time in nanosec units.
107 unsigned long long sched_clock(void)
109 unsigned long long this_offset;
111 if (unlikely(custom_sched_clock))
112 return (*custom_sched_clock)();
115 * Fall back to jiffies if there's no TSC available:
117 if (unlikely(tsc_disable))
118 /* No locking but a rare wrong value is not a big deal: */
119 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
121 /* read the Time Stamp Counter: */
122 rdtscll(this_offset);
124 /* return the value in ns */
125 return cycles_2_ns(this_offset);
128 static unsigned long calculate_cpu_khz(void)
130 unsigned long long start, end;
136 local_irq_save(flags);
138 /* run 3 times to ensure the cache is warm */
139 for (i = 0; i < 3; i++) {
140 mach_prepare_counter();
142 mach_countup(&count);
146 * Error: ECTCNEVERSET
147 * The CTC wasn't reliable: we got a hit on the very first read,
148 * or the CPU was so fast/slow that the quotient wouldn't fit in
154 delta64 = end - start;
156 /* cpu freq too fast: */
157 if (delta64 > (1ULL<<32))
160 /* cpu freq too slow: */
161 if (delta64 <= CALIBRATE_TIME_MSEC)
164 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
165 do_div(delta64,CALIBRATE_TIME_MSEC);
167 local_irq_restore(flags);
168 return (unsigned long)delta64;
170 local_irq_restore(flags);
174 int recalibrate_cpu_khz(void)
177 unsigned long cpu_khz_old = cpu_khz;
180 cpu_khz = calculate_cpu_khz();
182 cpu_data[0].loops_per_jiffy =
183 cpufreq_scale(cpu_data[0].loops_per_jiffy,
184 cpu_khz_old, cpu_khz);
193 EXPORT_SYMBOL(recalibrate_cpu_khz);
195 void __init tsc_init(void)
197 if (!cpu_has_tsc || tsc_disable)
200 cpu_khz = calculate_cpu_khz();
206 printk("Detected %lu.%03lu MHz processor.\n",
207 (unsigned long)cpu_khz / 1000,
208 (unsigned long)cpu_khz % 1000);
210 set_cyc2ns_scale(cpu_khz);
216 * Set the tsc_disable flag if there's no TSC support, this
217 * makes it a fast flag for the kernel to see whether it
218 * should be using the TSC.
223 #ifdef CONFIG_CPU_FREQ
226 * if the CPU frequency is scaled, TSC-based delays will need a different
227 * loops_per_jiffy value to function properly.
229 static unsigned int ref_freq = 0;
230 static unsigned long loops_per_jiffy_ref = 0;
231 static unsigned long cpu_khz_ref = 0;
234 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
236 struct cpufreq_freqs *freq = data;
238 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
239 write_seqlock_irq(&xtime_lock);
243 ref_freq = freq->new;
246 ref_freq = freq->old;
247 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
248 cpu_khz_ref = cpu_khz;
251 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
252 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
253 (val == CPUFREQ_RESUMECHANGE)) {
254 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
255 cpu_data[freq->cpu].loops_per_jiffy =
256 cpufreq_scale(loops_per_jiffy_ref,
257 ref_freq, freq->new);
261 if (num_online_cpus() == 1)
262 cpu_khz = cpufreq_scale(cpu_khz_ref,
263 ref_freq, freq->new);
264 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
266 set_cyc2ns_scale(cpu_khz);
268 * TSC based sched_clock turns
276 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
277 write_sequnlock_irq(&xtime_lock);
282 static struct notifier_block time_cpufreq_notifier_block = {
283 .notifier_call = time_cpufreq_notifier
286 static int __init cpufreq_tsc(void)
288 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
289 CPUFREQ_TRANSITION_NOTIFIER);
291 core_initcall(cpufreq_tsc);
295 /* clock source code */
297 static unsigned long current_tsc_khz = 0;
298 static int tsc_update_callback(void);
300 static cycle_t read_tsc(void)
309 static struct clocksource clocksource_tsc = {
313 .mask = CLOCKSOURCE_MASK(64),
314 .mult = 0, /* to be set */
316 .update_callback = tsc_update_callback,
320 static int tsc_update_callback(void)
324 /* check to see if we should switch to the safe clocksource: */
325 if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
326 clocksource_change_rating(&clocksource_tsc, 0);
330 /* only update if tsc_khz has changed: */
331 if (current_tsc_khz != tsc_khz) {
332 current_tsc_khz = tsc_khz;
333 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
334 clocksource_tsc.shift);
341 static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
343 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
349 /* List of systems that have known TSC problems */
350 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
352 .callback = dmi_mark_tsc_unstable,
353 .ident = "IBM Thinkpad 380XD",
355 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
356 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
362 #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
363 static struct timer_list verify_tsc_freq_timer;
365 /* XXX - Probably should add locking */
366 static void verify_tsc_freq(unsigned long unused)
369 static unsigned long last_jiffies;
371 u64 now_tsc, interval_tsc;
372 unsigned long now_jiffies, interval_jiffies;
375 if (check_tsc_unstable())
379 now_jiffies = jiffies;
385 interval_jiffies = now_jiffies - last_jiffies;
386 interval_tsc = now_tsc - last_tsc;
388 do_div(interval_tsc, cpu_khz*1000);
390 if (interval_tsc < (interval_jiffies * 3 / 4)) {
391 printk("TSC appears to be running slowly. "
392 "Marking it as unstable\n");
399 last_jiffies = now_jiffies;
400 /* set us up to go off on the next interval: */
401 mod_timer(&verify_tsc_freq_timer,
402 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
406 * Make an educated guess if the TSC is trustworthy and synchronized
409 static __init int unsynchronized_tsc(void)
412 * Intel systems are normally all synchronized.
413 * Exceptions must mark TSC as unstable:
415 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
418 /* assume multi socket systems are not synchronized: */
419 return num_possible_cpus() > 1;
422 static int __init init_tsc_clocksource(void)
425 if (cpu_has_tsc && tsc_khz && !tsc_disable) {
426 /* check blacklist */
427 dmi_check_system(bad_tsc_dmi_table);
429 if (unsynchronized_tsc()) /* mark unstable if unsynced */
431 current_tsc_khz = tsc_khz;
432 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
433 clocksource_tsc.shift);
434 /* lower the rating if we already know its unstable: */
435 if (check_tsc_unstable())
436 clocksource_tsc.rating = 0;
438 init_timer(&verify_tsc_freq_timer);
439 verify_tsc_freq_timer.function = verify_tsc_freq;
440 verify_tsc_freq_timer.expires =
441 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
442 add_timer(&verify_tsc_freq_timer);
444 return clocksource_register(&clocksource_tsc);
450 module_init(init_tsc_clocksource);