2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/sysdev.h>
17 #include <linux/clocksource.h>
18 #include <linux/jiffies.h>
19 #include <linux/time.h>
20 #include <linux/tick.h>
24 * This read-write spinlock protects us from races in SMP while
27 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
45 struct timespec xtime __attribute__ ((aligned (16)));
46 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47 static unsigned long total_sleep_time; /* seconds */
49 /* flag for if timekeeping is suspended */
50 int __read_mostly timekeeping_suspended;
52 static struct timespec xtime_cache __attribute__ ((aligned (16)));
53 void update_xtime_cache(u64 nsec)
56 timespec_add_ns(&xtime_cache, nsec);
59 struct clocksource *clock;
61 /* must hold xtime_lock */
62 void timekeeping_leap_insert(int leapsecond)
64 xtime.tv_sec += leapsecond;
65 wall_to_monotonic.tv_sec -= leapsecond;
66 update_vsyscall(&xtime, clock);
69 #ifdef CONFIG_GENERIC_TIME
71 * clocksource_forward_now - update clock to the current time
73 * Forward the current clock to update its state since the last call to
74 * update_wall_time(). This is useful before significant clock changes,
75 * as it avoids having to deal with this time offset explicitly.
77 static void clocksource_forward_now(void)
79 cycle_t cycle_now, cycle_delta;
82 cycle_now = clock->read(clock);
83 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
84 clock->cycle_last = cycle_now;
86 nsec = cyc2ns(clock, cycle_delta);
88 /* If arch requires, add in gettimeoffset() */
89 nsec += arch_gettimeoffset();
91 timespec_add_ns(&xtime, nsec);
93 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
94 clock->raw_time.tv_nsec += nsec;
98 * getnstimeofday - Returns the time of day in a timespec
99 * @ts: pointer to the timespec to be set
101 * Returns the time of day in a timespec.
103 void getnstimeofday(struct timespec *ts)
105 cycle_t cycle_now, cycle_delta;
109 WARN_ON(timekeeping_suspended);
112 seq = read_seqbegin(&xtime_lock);
116 /* read clocksource: */
117 cycle_now = clock->read(clock);
119 /* calculate the delta since the last update_wall_time: */
120 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
122 /* convert to nanoseconds: */
123 nsecs = cyc2ns(clock, cycle_delta);
125 /* If arch requires, add in gettimeoffset() */
126 nsecs += arch_gettimeoffset();
128 } while (read_seqretry(&xtime_lock, seq));
130 timespec_add_ns(ts, nsecs);
133 EXPORT_SYMBOL(getnstimeofday);
135 ktime_t ktime_get(void)
137 cycle_t cycle_now, cycle_delta;
141 WARN_ON(timekeeping_suspended);
144 seq = read_seqbegin(&xtime_lock);
145 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
146 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
148 /* read clocksource: */
149 cycle_now = clock->read(clock);
151 /* calculate the delta since the last update_wall_time: */
152 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
154 /* convert to nanoseconds: */
155 nsecs += cyc2ns(clock, cycle_delta);
157 } while (read_seqretry(&xtime_lock, seq));
159 * Use ktime_set/ktime_add_ns to create a proper ktime on
160 * 32-bit architectures without CONFIG_KTIME_SCALAR.
162 return ktime_add_ns(ktime_set(secs, 0), nsecs);
164 EXPORT_SYMBOL_GPL(ktime_get);
167 * ktime_get_ts - get the monotonic clock in timespec format
168 * @ts: pointer to timespec variable
170 * The function calculates the monotonic clock from the realtime
171 * clock and the wall_to_monotonic offset and stores the result
172 * in normalized timespec format in the variable pointed to by @ts.
174 void ktime_get_ts(struct timespec *ts)
176 cycle_t cycle_now, cycle_delta;
177 struct timespec tomono;
181 WARN_ON(timekeeping_suspended);
184 seq = read_seqbegin(&xtime_lock);
186 tomono = wall_to_monotonic;
188 /* read clocksource: */
189 cycle_now = clock->read(clock);
191 /* calculate the delta since the last update_wall_time: */
192 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
194 /* convert to nanoseconds: */
195 nsecs = cyc2ns(clock, cycle_delta);
197 } while (read_seqretry(&xtime_lock, seq));
199 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
200 ts->tv_nsec + tomono.tv_nsec + nsecs);
202 EXPORT_SYMBOL_GPL(ktime_get_ts);
205 * do_gettimeofday - Returns the time of day in a timeval
206 * @tv: pointer to the timeval to be set
208 * NOTE: Users should be converted to using getnstimeofday()
210 void do_gettimeofday(struct timeval *tv)
214 getnstimeofday(&now);
215 tv->tv_sec = now.tv_sec;
216 tv->tv_usec = now.tv_nsec/1000;
219 EXPORT_SYMBOL(do_gettimeofday);
221 * do_settimeofday - Sets the time of day
222 * @tv: pointer to the timespec variable containing the new time
224 * Sets the time of day to the new time and update NTP and notify hrtimers
226 int do_settimeofday(struct timespec *tv)
228 struct timespec ts_delta;
231 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
234 write_seqlock_irqsave(&xtime_lock, flags);
236 clocksource_forward_now();
238 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
239 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
240 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
244 update_xtime_cache(0);
249 update_vsyscall(&xtime, clock);
251 write_sequnlock_irqrestore(&xtime_lock, flags);
253 /* signal hrtimers about time change */
259 EXPORT_SYMBOL(do_settimeofday);
262 * change_clocksource - Swaps clocksources if a new one is available
264 * Accumulates current time interval and initializes new clocksource
266 static void change_clocksource(void)
268 struct clocksource *new, *old;
270 new = clocksource_get_next();
275 clocksource_forward_now();
277 if (new->enable && !new->enable(new))
280 * The frequency may have changed while the clocksource
281 * was disabled. If so the code in ->enable() must update
282 * the mult value to reflect the new frequency. Make sure
283 * mult_orig follows this change.
285 new->mult_orig = new->mult;
287 new->raw_time = clock->raw_time;
291 * Save mult_orig in mult so that the value can be restored
292 * regardless if ->enable() updates the value of mult or not.
294 old->mult = old->mult_orig;
298 clock->cycle_last = 0;
299 clock->cycle_last = clock->read(clock);
301 clock->xtime_nsec = 0;
302 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
307 * We're holding xtime lock and waking up klogd would deadlock
308 * us on enqueue. So no printing!
309 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
313 #else /* GENERIC_TIME */
314 static inline void clocksource_forward_now(void) { }
315 static inline void change_clocksource(void) { }
318 * ktime_get - get the monotonic time in ktime_t format
320 * returns the time in ktime_t format
322 ktime_t ktime_get(void)
328 return timespec_to_ktime(now);
330 EXPORT_SYMBOL_GPL(ktime_get);
333 * ktime_get_ts - get the monotonic clock in timespec format
334 * @ts: pointer to timespec variable
336 * The function calculates the monotonic clock from the realtime
337 * clock and the wall_to_monotonic offset and stores the result
338 * in normalized timespec format in the variable pointed to by @ts.
340 void ktime_get_ts(struct timespec *ts)
342 struct timespec tomono;
346 seq = read_seqbegin(&xtime_lock);
348 tomono = wall_to_monotonic;
350 } while (read_seqretry(&xtime_lock, seq));
352 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
353 ts->tv_nsec + tomono.tv_nsec);
355 EXPORT_SYMBOL_GPL(ktime_get_ts);
356 #endif /* !GENERIC_TIME */
359 * ktime_get_real - get the real (wall-) time in ktime_t format
361 * returns the time in ktime_t format
363 ktime_t ktime_get_real(void)
367 getnstimeofday(&now);
369 return timespec_to_ktime(now);
371 EXPORT_SYMBOL_GPL(ktime_get_real);
374 * getrawmonotonic - Returns the raw monotonic time in a timespec
375 * @ts: pointer to the timespec to be set
377 * Returns the raw monotonic time (completely un-modified by ntp)
379 void getrawmonotonic(struct timespec *ts)
383 cycle_t cycle_now, cycle_delta;
386 seq = read_seqbegin(&xtime_lock);
388 /* read clocksource: */
389 cycle_now = clock->read(clock);
391 /* calculate the delta since the last update_wall_time: */
392 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
394 /* convert to nanoseconds: */
395 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
397 *ts = clock->raw_time;
399 } while (read_seqretry(&xtime_lock, seq));
401 timespec_add_ns(ts, nsecs);
403 EXPORT_SYMBOL(getrawmonotonic);
407 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
409 int timekeeping_valid_for_hres(void)
415 seq = read_seqbegin(&xtime_lock);
417 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
419 } while (read_seqretry(&xtime_lock, seq));
425 * read_persistent_clock - Return time in seconds from the persistent clock.
427 * Weak dummy function for arches that do not yet support it.
428 * Returns seconds from epoch using the battery backed persistent clock.
429 * Returns zero if unsupported.
431 * XXX - Do be sure to remove it once all arches implement it.
433 unsigned long __attribute__((weak)) read_persistent_clock(void)
439 * timekeeping_init - Initializes the clocksource and common timekeeping values
441 void __init timekeeping_init(void)
444 unsigned long sec = read_persistent_clock();
446 write_seqlock_irqsave(&xtime_lock, flags);
450 clock = clocksource_get_next();
452 clock->enable(clock);
453 /* set mult_orig on enable */
454 clock->mult_orig = clock->mult;
455 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
456 clock->cycle_last = clock->read(clock);
460 set_normalized_timespec(&wall_to_monotonic,
461 -xtime.tv_sec, -xtime.tv_nsec);
462 update_xtime_cache(0);
463 total_sleep_time = 0;
464 write_sequnlock_irqrestore(&xtime_lock, flags);
467 /* time in seconds when suspend began */
468 static unsigned long timekeeping_suspend_time;
471 * timekeeping_resume - Resumes the generic timekeeping subsystem.
474 * This is for the generic clocksource timekeeping.
475 * xtime/wall_to_monotonic/jiffies/etc are
476 * still managed by arch specific suspend/resume code.
478 static int timekeeping_resume(struct sys_device *dev)
481 unsigned long now = read_persistent_clock();
483 clocksource_resume();
485 write_seqlock_irqsave(&xtime_lock, flags);
487 if (now && (now > timekeeping_suspend_time)) {
488 unsigned long sleep_length = now - timekeeping_suspend_time;
490 xtime.tv_sec += sleep_length;
491 wall_to_monotonic.tv_sec -= sleep_length;
492 total_sleep_time += sleep_length;
494 update_xtime_cache(0);
495 /* re-base the last cycle value */
496 clock->cycle_last = clock->read(clock);
498 timekeeping_suspended = 0;
499 write_sequnlock_irqrestore(&xtime_lock, flags);
501 touch_softlockup_watchdog();
503 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
505 /* Resume hrtimers */
506 hres_timers_resume();
511 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
515 timekeeping_suspend_time = read_persistent_clock();
517 write_seqlock_irqsave(&xtime_lock, flags);
518 clocksource_forward_now();
519 timekeeping_suspended = 1;
520 write_sequnlock_irqrestore(&xtime_lock, flags);
522 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
527 /* sysfs resume/suspend bits for timekeeping */
528 static struct sysdev_class timekeeping_sysclass = {
529 .name = "timekeeping",
530 .resume = timekeeping_resume,
531 .suspend = timekeeping_suspend,
534 static struct sys_device device_timer = {
536 .cls = &timekeeping_sysclass,
539 static int __init timekeeping_init_device(void)
541 int error = sysdev_class_register(&timekeeping_sysclass);
543 error = sysdev_register(&device_timer);
547 device_initcall(timekeeping_init_device);
550 * If the error is already larger, we look ahead even further
551 * to compensate for late or lost adjustments.
553 static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
561 * Use the current error value to determine how much to look ahead.
562 * The larger the error the slower we adjust for it to avoid problems
563 * with losing too many ticks, otherwise we would overadjust and
564 * produce an even larger error. The smaller the adjustment the
565 * faster we try to adjust for it, as lost ticks can do less harm
566 * here. This is tuned so that an error of about 1 msec is adjusted
567 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
569 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
570 error2 = abs(error2);
571 for (look_ahead = 0; error2 > 0; look_ahead++)
575 * Now calculate the error in (1 << look_ahead) ticks, but first
576 * remove the single look ahead already included in the error.
578 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
579 tick_error -= clock->xtime_interval >> 1;
580 error = ((error - tick_error) >> look_ahead) + tick_error;
582 /* Finally calculate the adjustment shift value. */
587 *interval = -*interval;
591 for (adj = 0; error > i; adj++)
600 * Adjust the multiplier to reduce the error value,
601 * this is optimized for the most common adjustments of -1,0,1,
602 * for other values we can do a bit more work.
604 static void clocksource_adjust(s64 offset)
606 s64 error, interval = clock->cycle_interval;
609 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
610 if (error > interval) {
612 if (likely(error <= interval))
615 adj = clocksource_bigadjust(error, &interval, &offset);
616 } else if (error < -interval) {
618 if (likely(error >= -interval)) {
620 interval = -interval;
623 adj = clocksource_bigadjust(error, &interval, &offset);
628 clock->xtime_interval += interval;
629 clock->xtime_nsec -= offset;
630 clock->error -= (interval - offset) <<
631 (NTP_SCALE_SHIFT - clock->shift);
635 * update_wall_time - Uses the current clocksource to increment the wall time
637 * Called from the timer interrupt, must hold a write on xtime_lock.
639 void update_wall_time(void)
643 /* Make sure we're fully resumed: */
644 if (unlikely(timekeeping_suspended))
647 #ifdef CONFIG_GENERIC_TIME
648 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
650 offset = clock->cycle_interval;
652 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
654 /* normally this loop will run just once, however in the
655 * case of lost or late ticks, it will accumulate correctly.
657 while (offset >= clock->cycle_interval) {
658 /* accumulate one interval */
659 offset -= clock->cycle_interval;
660 clock->cycle_last += clock->cycle_interval;
662 clock->xtime_nsec += clock->xtime_interval;
663 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
664 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
669 clock->raw_time.tv_nsec += clock->raw_interval;
670 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
671 clock->raw_time.tv_nsec -= NSEC_PER_SEC;
672 clock->raw_time.tv_sec++;
675 /* accumulate error between NTP and clock interval */
676 clock->error += tick_length;
677 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
680 /* correct the clock when NTP error is too big */
681 clocksource_adjust(offset);
684 * Since in the loop above, we accumulate any amount of time
685 * in xtime_nsec over a second into xtime.tv_sec, its possible for
686 * xtime_nsec to be fairly small after the loop. Further, if we're
687 * slightly speeding the clocksource up in clocksource_adjust(),
688 * its possible the required corrective factor to xtime_nsec could
689 * cause it to underflow.
691 * Now, we cannot simply roll the accumulated second back, since
692 * the NTP subsystem has been notified via second_overflow. So
693 * instead we push xtime_nsec forward by the amount we underflowed,
694 * and add that amount into the error.
696 * We'll correct this error next time through this function, when
697 * xtime_nsec is not as small.
699 if (unlikely((s64)clock->xtime_nsec < 0)) {
700 s64 neg = -(s64)clock->xtime_nsec;
701 clock->xtime_nsec = 0;
702 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
705 /* store full nanoseconds into xtime after rounding it up and
706 * add the remainder to the error difference.
708 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
709 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
710 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
712 update_xtime_cache(cyc2ns(clock, offset));
714 /* check to see if there is a new clocksource to use */
715 change_clocksource();
716 update_vsyscall(&xtime, clock);
720 * getboottime - Return the real time of system boot.
721 * @ts: pointer to the timespec to be set
723 * Returns the time of day in a timespec.
725 * This is based on the wall_to_monotonic offset and the total suspend
726 * time. Calls to settimeofday will affect the value returned (which
727 * basically means that however wrong your real time clock is at boot time,
728 * you get the right time here).
730 void getboottime(struct timespec *ts)
732 set_normalized_timespec(ts,
733 - (wall_to_monotonic.tv_sec + total_sleep_time),
734 - wall_to_monotonic.tv_nsec);
738 * monotonic_to_bootbased - Convert the monotonic time to boot based.
739 * @ts: pointer to the timespec to be converted
741 void monotonic_to_bootbased(struct timespec *ts)
743 ts->tv_sec += total_sleep_time;
746 unsigned long get_seconds(void)
748 return xtime_cache.tv_sec;
750 EXPORT_SYMBOL(get_seconds);
753 struct timespec current_kernel_time(void)
759 seq = read_seqbegin(&xtime_lock);
762 } while (read_seqretry(&xtime_lock, seq));
766 EXPORT_SYMBOL(current_kernel_time);