2 * linux/kernel/time/clocksource.c
4 * This file contains the functions which manage clocksource drivers.
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * o Allow clocksource drivers to be unregistered
26 #include <linux/clocksource.h>
27 #include <linux/sysdev.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31 #include <linux/tick.h>
32 #include <linux/kthread.h>
34 void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
42 EXPORT_SYMBOL(timecounter_init);
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
55 static u64 timecounter_read_delta(struct timecounter *tc)
57 cycle_t cycle_now, cycle_delta;
60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc);
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now;
75 u64 timecounter_read(struct timecounter *tc)
79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc);
86 EXPORT_SYMBOL(timecounter_read);
88 u64 timecounter_cyc2time(struct timecounter *tc,
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
108 EXPORT_SYMBOL(timecounter_cyc2time);
110 /*[Clocksource internal variables]---------
112 * currently selected clocksource.
114 * linked list with the registered clocksources
116 * protects manipulations to curr_clocksource and the clocksource_list
118 * Name of the user-specified clocksource.
120 static struct clocksource *curr_clocksource;
121 static LIST_HEAD(clocksource_list);
122 static DEFINE_MUTEX(clocksource_mutex);
123 static char override_name[32];
125 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
126 static void clocksource_watchdog_work(struct work_struct *work);
128 static LIST_HEAD(watchdog_list);
129 static struct clocksource *watchdog;
130 static struct timer_list watchdog_timer;
131 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
132 static DEFINE_SPINLOCK(watchdog_lock);
133 static cycle_t watchdog_last;
134 static int watchdog_running;
136 static int clocksource_watchdog_kthread(void *data);
137 static void __clocksource_change_rating(struct clocksource *cs, int rating);
140 * Interval: 0.5sec Threshold: 0.0625s
142 #define WATCHDOG_INTERVAL (HZ >> 1)
143 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
145 static void clocksource_watchdog_work(struct work_struct *work)
148 * If kthread_run fails the next watchdog scan over the
149 * watchdog_list will find the unstable clock again.
151 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
154 static void __clocksource_unstable(struct clocksource *cs)
156 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
157 cs->flags |= CLOCK_SOURCE_UNSTABLE;
158 schedule_work(&watchdog_work);
161 static void clocksource_unstable(struct clocksource *cs, int64_t delta)
163 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
165 __clocksource_unstable(cs);
169 * clocksource_mark_unstable - mark clocksource unstable via watchdog
170 * @cs: clocksource to be marked unstable
172 * This function is called instead of clocksource_change_rating from
173 * cpu hotplug code to avoid a deadlock between the clocksource mutex
174 * and the cpu hotplug mutex. It defers the update of the clocksource
175 * to the watchdog thread.
177 void clocksource_mark_unstable(struct clocksource *cs)
181 spin_lock_irqsave(&watchdog_lock, flags);
182 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
183 if (list_empty(&cs->wd_list))
184 list_add(&cs->wd_list, &watchdog_list);
185 __clocksource_unstable(cs);
187 spin_unlock_irqrestore(&watchdog_lock, flags);
190 static void clocksource_watchdog(unsigned long data)
192 struct clocksource *cs;
193 cycle_t csnow, wdnow;
194 int64_t wd_nsec, cs_nsec;
197 spin_lock(&watchdog_lock);
198 if (!watchdog_running)
201 wdnow = watchdog->read(watchdog);
202 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
203 watchdog->mult, watchdog->shift);
204 watchdog_last = wdnow;
206 list_for_each_entry(cs, &watchdog_list, wd_list) {
208 /* Clocksource already marked unstable? */
209 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
210 schedule_work(&watchdog_work);
214 csnow = cs->read(cs);
216 /* Clocksource initialized ? */
217 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
218 cs->flags |= CLOCK_SOURCE_WATCHDOG;
223 /* Check the deviation from the watchdog clocksource. */
224 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
225 cs->mask, cs->mult, cs->shift);
227 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
228 clocksource_unstable(cs, cs_nsec - wd_nsec);
232 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
233 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
234 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
235 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
237 * We just marked the clocksource as highres-capable,
238 * notify the rest of the system as well so that we
239 * transition into high-res mode:
246 * Cycle through CPUs to check if the CPUs stay synchronized
249 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
250 if (next_cpu >= nr_cpu_ids)
251 next_cpu = cpumask_first(cpu_online_mask);
252 watchdog_timer.expires += WATCHDOG_INTERVAL;
253 add_timer_on(&watchdog_timer, next_cpu);
255 spin_unlock(&watchdog_lock);
258 static inline void clocksource_start_watchdog(void)
260 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
262 init_timer(&watchdog_timer);
263 watchdog_timer.function = clocksource_watchdog;
264 watchdog_last = watchdog->read(watchdog);
265 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
266 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
267 watchdog_running = 1;
270 static inline void clocksource_stop_watchdog(void)
272 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
274 del_timer(&watchdog_timer);
275 watchdog_running = 0;
278 static inline void clocksource_reset_watchdog(void)
280 struct clocksource *cs;
282 list_for_each_entry(cs, &watchdog_list, wd_list)
283 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
286 static void clocksource_resume_watchdog(void)
290 spin_lock_irqsave(&watchdog_lock, flags);
291 clocksource_reset_watchdog();
292 spin_unlock_irqrestore(&watchdog_lock, flags);
295 static void clocksource_enqueue_watchdog(struct clocksource *cs)
299 spin_lock_irqsave(&watchdog_lock, flags);
300 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
301 /* cs is a clocksource to be watched. */
302 list_add(&cs->wd_list, &watchdog_list);
303 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
305 /* cs is a watchdog. */
306 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
307 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
308 /* Pick the best watchdog. */
309 if (!watchdog || cs->rating > watchdog->rating) {
311 /* Reset watchdog cycles */
312 clocksource_reset_watchdog();
315 /* Check if the watchdog timer needs to be started. */
316 clocksource_start_watchdog();
317 spin_unlock_irqrestore(&watchdog_lock, flags);
320 static void clocksource_dequeue_watchdog(struct clocksource *cs)
322 struct clocksource *tmp;
325 spin_lock_irqsave(&watchdog_lock, flags);
326 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
327 /* cs is a watched clocksource. */
328 list_del_init(&cs->wd_list);
329 } else if (cs == watchdog) {
330 /* Reset watchdog cycles */
331 clocksource_reset_watchdog();
332 /* Current watchdog is removed. Find an alternative. */
334 list_for_each_entry(tmp, &clocksource_list, list) {
335 if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
337 if (!watchdog || tmp->rating > watchdog->rating)
341 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
342 /* Check if the watchdog timer needs to be stopped. */
343 clocksource_stop_watchdog();
344 spin_unlock_irqrestore(&watchdog_lock, flags);
347 static int clocksource_watchdog_kthread(void *data)
349 struct clocksource *cs, *tmp;
353 mutex_lock(&clocksource_mutex);
354 spin_lock_irqsave(&watchdog_lock, flags);
355 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
356 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
357 list_del_init(&cs->wd_list);
358 list_add(&cs->wd_list, &unstable);
360 /* Check if the watchdog timer needs to be stopped. */
361 clocksource_stop_watchdog();
362 spin_unlock_irqrestore(&watchdog_lock, flags);
364 /* Needs to be done outside of watchdog lock */
365 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
366 list_del_init(&cs->wd_list);
367 __clocksource_change_rating(cs, 0);
369 mutex_unlock(&clocksource_mutex);
373 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
375 static void clocksource_enqueue_watchdog(struct clocksource *cs)
377 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
378 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
381 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
382 static inline void clocksource_resume_watchdog(void) { }
384 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
387 * clocksource_resume - resume the clocksource(s)
389 void clocksource_resume(void)
391 struct clocksource *cs;
393 mutex_lock(&clocksource_mutex);
395 list_for_each_entry(cs, &clocksource_list, list)
399 clocksource_resume_watchdog();
401 mutex_unlock(&clocksource_mutex);
405 * clocksource_touch_watchdog - Update watchdog
407 * Update the watchdog after exception contexts such as kgdb so as not
408 * to incorrectly trip the watchdog.
411 void clocksource_touch_watchdog(void)
413 clocksource_resume_watchdog();
416 #ifdef CONFIG_GENERIC_TIME
418 static int finished_booting;
421 * clocksource_select - Select the best clocksource available
423 * Private function. Must hold clocksource_mutex when called.
425 * Select the clocksource with the best rating, or the clocksource,
426 * which is selected by userspace override.
428 static void clocksource_select(void)
430 struct clocksource *best, *cs;
432 if (!finished_booting || list_empty(&clocksource_list))
434 /* First clocksource on the list has the best rating. */
435 best = list_first_entry(&clocksource_list, struct clocksource, list);
436 /* Check for the override clocksource. */
437 list_for_each_entry(cs, &clocksource_list, list) {
438 if (strcmp(cs->name, override_name) != 0)
441 * Check to make sure we don't switch to a non-highres
442 * capable clocksource if the tick code is in oneshot
443 * mode (highres or nohz)
445 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
446 tick_oneshot_mode_active()) {
447 /* Override clocksource cannot be used. */
448 printk(KERN_WARNING "Override clocksource %s is not "
449 "HRT compatible. Cannot switch while in "
450 "HRT/NOHZ mode\n", cs->name);
451 override_name[0] = 0;
453 /* Override clocksource can be used. */
457 if (curr_clocksource != best) {
458 printk(KERN_INFO "Switching to clocksource %s\n", best->name);
459 curr_clocksource = best;
460 timekeeping_notify(curr_clocksource);
465 * clocksource_done_booting - Called near the end of core bootup
467 * Hack to avoid lots of clocksource churn at boot time.
468 * We use fs_initcall because we want this to start before
469 * device_initcall but after subsys_initcall.
471 static int __init clocksource_done_booting(void)
473 finished_booting = 1;
474 clocksource_select();
477 fs_initcall(clocksource_done_booting);
479 #else /* CONFIG_GENERIC_TIME */
481 static inline void clocksource_select(void) { }
486 * Enqueue the clocksource sorted by rating
488 static void clocksource_enqueue(struct clocksource *cs)
490 struct list_head *entry = &clocksource_list;
491 struct clocksource *tmp;
493 list_for_each_entry(tmp, &clocksource_list, list)
494 /* Keep track of the place, where to insert */
495 if (tmp->rating >= cs->rating)
497 list_add(&cs->list, entry);
501 * clocksource_register - Used to install new clocksources
502 * @t: clocksource to be registered
504 * Returns -EBUSY if registration fails, zero otherwise.
506 int clocksource_register(struct clocksource *cs)
508 mutex_lock(&clocksource_mutex);
509 clocksource_enqueue(cs);
510 clocksource_select();
511 clocksource_enqueue_watchdog(cs);
512 mutex_unlock(&clocksource_mutex);
515 EXPORT_SYMBOL(clocksource_register);
517 static void __clocksource_change_rating(struct clocksource *cs, int rating)
521 clocksource_enqueue(cs);
522 clocksource_select();
526 * clocksource_change_rating - Change the rating of a registered clocksource
528 void clocksource_change_rating(struct clocksource *cs, int rating)
530 mutex_lock(&clocksource_mutex);
531 __clocksource_change_rating(cs, rating);
532 mutex_unlock(&clocksource_mutex);
534 EXPORT_SYMBOL(clocksource_change_rating);
537 * clocksource_unregister - remove a registered clocksource
539 void clocksource_unregister(struct clocksource *cs)
541 mutex_lock(&clocksource_mutex);
542 clocksource_dequeue_watchdog(cs);
544 clocksource_select();
545 mutex_unlock(&clocksource_mutex);
547 EXPORT_SYMBOL(clocksource_unregister);
551 * sysfs_show_current_clocksources - sysfs interface for current clocksource
553 * @buf: char buffer to be filled with clocksource list
555 * Provides sysfs interface for listing current clocksource.
558 sysfs_show_current_clocksources(struct sys_device *dev,
559 struct sysdev_attribute *attr, char *buf)
563 mutex_lock(&clocksource_mutex);
564 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
565 mutex_unlock(&clocksource_mutex);
571 * sysfs_override_clocksource - interface for manually overriding clocksource
573 * @buf: name of override clocksource
574 * @count: length of buffer
576 * Takes input from sysfs interface for manually overriding the default
577 * clocksource selction.
579 static ssize_t sysfs_override_clocksource(struct sys_device *dev,
580 struct sysdev_attribute *attr,
581 const char *buf, size_t count)
585 /* strings from sysfs write are not 0 terminated! */
586 if (count >= sizeof(override_name))
590 if (buf[count-1] == '\n')
593 mutex_lock(&clocksource_mutex);
596 memcpy(override_name, buf, count);
597 override_name[count] = 0;
598 clocksource_select();
600 mutex_unlock(&clocksource_mutex);
606 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
608 * @buf: char buffer to be filled with clocksource list
610 * Provides sysfs interface for listing registered clocksources
613 sysfs_show_available_clocksources(struct sys_device *dev,
614 struct sysdev_attribute *attr,
617 struct clocksource *src;
620 mutex_lock(&clocksource_mutex);
621 list_for_each_entry(src, &clocksource_list, list) {
623 * Don't show non-HRES clocksource if the tick code is
624 * in one shot mode (highres=on or nohz=on)
626 if (!tick_oneshot_mode_active() ||
627 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
628 count += snprintf(buf + count,
629 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
632 mutex_unlock(&clocksource_mutex);
634 count += snprintf(buf + count,
635 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
643 static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
644 sysfs_override_clocksource);
646 static SYSDEV_ATTR(available_clocksource, 0444,
647 sysfs_show_available_clocksources, NULL);
649 static struct sysdev_class clocksource_sysclass = {
650 .name = "clocksource",
653 static struct sys_device device_clocksource = {
655 .cls = &clocksource_sysclass,
658 static int __init init_clocksource_sysfs(void)
660 int error = sysdev_class_register(&clocksource_sysclass);
663 error = sysdev_register(&device_clocksource);
665 error = sysdev_create_file(
667 &attr_current_clocksource);
669 error = sysdev_create_file(
671 &attr_available_clocksource);
675 device_initcall(init_clocksource_sysfs);
676 #endif /* CONFIG_SYSFS */
679 * boot_override_clocksource - boot clock override
680 * @str: override name
682 * Takes a clocksource= boot argument and uses it
683 * as the clocksource override name.
685 static int __init boot_override_clocksource(char* str)
687 mutex_lock(&clocksource_mutex);
689 strlcpy(override_name, str, sizeof(override_name));
690 mutex_unlock(&clocksource_mutex);
694 __setup("clocksource=", boot_override_clocksource);
697 * boot_override_clock - Compatibility layer for deprecated boot option
698 * @str: override name
700 * DEPRECATED! Takes a clock= boot argument and uses it
701 * as the clocksource override name
703 static int __init boot_override_clock(char* str)
705 if (!strcmp(str, "pmtmr")) {
706 printk("Warning: clock=pmtmr is deprecated. "
707 "Use clocksource=acpi_pm.\n");
708 return boot_override_clocksource("acpi_pm");
710 printk("Warning! clock= boot option is deprecated. "
711 "Use clocksource=xyz\n");
712 return boot_override_clocksource(str);
715 __setup("clock=", boot_override_clock);