2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/irq.h>
36 #include <linux/module.h>
37 #include <linux/percpu.h>
38 #include <linux/hrtimer.h>
39 #include <linux/notifier.h>
40 #include <linux/syscalls.h>
41 #include <linux/kallsyms.h>
42 #include <linux/interrupt.h>
43 #include <linux/tick.h>
44 #include <linux/seq_file.h>
45 #include <linux/err.h>
47 #include <asm/uaccess.h>
50 * ktime_get - get the monotonic time in ktime_t format
52 * returns the time in ktime_t format
54 ktime_t ktime_get(void)
60 return timespec_to_ktime(now);
62 EXPORT_SYMBOL_GPL(ktime_get);
65 * ktime_get_real - get the real (wall-) time in ktime_t format
67 * returns the time in ktime_t format
69 ktime_t ktime_get_real(void)
75 return timespec_to_ktime(now);
78 EXPORT_SYMBOL_GPL(ktime_get_real);
83 * Note: If we want to add new timer bases, we have to skip the two
84 * clock ids captured by the cpu-timers. We do this by holding empty
85 * entries rather than doing math adjustment of the clock ids.
86 * This ensures that we capture erroneous accesses to these clock ids
87 * rather than moving them into the range of valid clock id's.
89 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
95 .index = CLOCK_REALTIME,
96 .get_time = &ktime_get_real,
97 .resolution = KTIME_LOW_RES,
100 .index = CLOCK_MONOTONIC,
101 .get_time = &ktime_get,
102 .resolution = KTIME_LOW_RES,
108 * ktime_get_ts - get the monotonic clock in timespec format
109 * @ts: pointer to timespec variable
111 * The function calculates the monotonic clock from the realtime
112 * clock and the wall_to_monotonic offset and stores the result
113 * in normalized timespec format in the variable pointed to by @ts.
115 void ktime_get_ts(struct timespec *ts)
117 struct timespec tomono;
121 seq = read_seqbegin(&xtime_lock);
123 tomono = wall_to_monotonic;
125 } while (read_seqretry(&xtime_lock, seq));
127 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
128 ts->tv_nsec + tomono.tv_nsec);
130 EXPORT_SYMBOL_GPL(ktime_get_ts);
133 * Get the coarse grained time at the softirq based on xtime and
136 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
138 ktime_t xtim, tomono;
139 struct timespec xts, tom;
143 seq = read_seqbegin(&xtime_lock);
144 xts = current_kernel_time();
145 tom = wall_to_monotonic;
146 } while (read_seqretry(&xtime_lock, seq));
148 xtim = timespec_to_ktime(xts);
149 tomono = timespec_to_ktime(tom);
150 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
151 base->clock_base[CLOCK_MONOTONIC].softirq_time =
152 ktime_add(xtim, tomono);
156 * Helper function to check, whether the timer is running the callback
159 static inline int hrtimer_callback_running(struct hrtimer *timer)
161 return timer->state & HRTIMER_STATE_CALLBACK;
165 * Functions and macros which are different for UP/SMP systems are kept in a
171 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
172 * means that all timers which are tied to this base via timer->base are
173 * locked, and the base itself is locked too.
175 * So __run_timers/migrate_timers can safely modify all timers which could
176 * be found on the lists/queues.
178 * When the timer's base is locked, and the timer removed from list, it is
179 * possible to set timer->base = NULL and drop the lock: the timer remains
183 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
184 unsigned long *flags)
186 struct hrtimer_clock_base *base;
190 if (likely(base != NULL)) {
191 spin_lock_irqsave(&base->cpu_base->lock, *flags);
192 if (likely(base == timer->base))
194 /* The timer has migrated to another CPU: */
195 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
202 * Switch the timer base to the current CPU when possible.
204 static inline struct hrtimer_clock_base *
205 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
207 struct hrtimer_clock_base *new_base;
208 struct hrtimer_cpu_base *new_cpu_base;
210 new_cpu_base = &__get_cpu_var(hrtimer_bases);
211 new_base = &new_cpu_base->clock_base[base->index];
213 if (base != new_base) {
215 * We are trying to schedule the timer on the local CPU.
216 * However we can't change timer's base while it is running,
217 * so we keep it on the same CPU. No hassle vs. reprogramming
218 * the event source in the high resolution case. The softirq
219 * code will take care of this when the timer function has
220 * completed. There is no conflict as we hold the lock until
221 * the timer is enqueued.
223 if (unlikely(hrtimer_callback_running(timer)))
226 /* See the comment in lock_timer_base() */
228 spin_unlock(&base->cpu_base->lock);
229 spin_lock(&new_base->cpu_base->lock);
230 timer->base = new_base;
235 #else /* CONFIG_SMP */
237 static inline struct hrtimer_clock_base *
238 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
240 struct hrtimer_clock_base *base = timer->base;
242 spin_lock_irqsave(&base->cpu_base->lock, *flags);
247 # define switch_hrtimer_base(t, b) (b)
249 #endif /* !CONFIG_SMP */
252 * Functions for the union type storage format of ktime_t which are
253 * too large for inlining:
255 #if BITS_PER_LONG < 64
256 # ifndef CONFIG_KTIME_SCALAR
258 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
260 * @nsec: the scalar nsec value to add
262 * Returns the sum of kt and nsec in ktime_t format
264 ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
268 if (likely(nsec < NSEC_PER_SEC)) {
271 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
273 tmp = ktime_set((long)nsec, rem);
276 return ktime_add(kt, tmp);
279 EXPORT_SYMBOL_GPL(ktime_add_ns);
282 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
284 * @nsec: the scalar nsec value to subtract
286 * Returns the subtraction of @nsec from @kt in ktime_t format
288 ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
292 if (likely(nsec < NSEC_PER_SEC)) {
295 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
297 tmp = ktime_set((long)nsec, rem);
300 return ktime_sub(kt, tmp);
303 EXPORT_SYMBOL_GPL(ktime_sub_ns);
304 # endif /* !CONFIG_KTIME_SCALAR */
307 * Divide a ktime value by a nanosecond value
309 u64 ktime_divns(const ktime_t kt, s64 div)
314 dclc = dns = ktime_to_ns(kt);
316 /* Make sure the divisor is less than 2^32: */
322 do_div(dclc, (unsigned long) div);
326 #endif /* BITS_PER_LONG >= 64 */
329 * Add two ktime values and do a safety check for overflow:
331 ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
333 ktime_t res = ktime_add(lhs, rhs);
336 * We use KTIME_SEC_MAX here, the maximum timeout which we can
337 * return to user space in a timespec:
339 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
340 res = ktime_set(KTIME_SEC_MAX, 0);
346 * Check, whether the timer is on the callback pending list
348 static inline int hrtimer_cb_pending(const struct hrtimer *timer)
350 return timer->state & HRTIMER_STATE_PENDING;
354 * Remove a timer from the callback pending list
356 static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
358 list_del_init(&timer->cb_entry);
361 /* High resolution timer related functions */
362 #ifdef CONFIG_HIGH_RES_TIMERS
365 * High resolution timer enabled ?
367 static int hrtimer_hres_enabled __read_mostly = 1;
370 * Enable / Disable high resolution mode
372 static int __init setup_hrtimer_hres(char *str)
374 if (!strcmp(str, "off"))
375 hrtimer_hres_enabled = 0;
376 else if (!strcmp(str, "on"))
377 hrtimer_hres_enabled = 1;
383 __setup("highres=", setup_hrtimer_hres);
386 * hrtimer_high_res_enabled - query, if the highres mode is enabled
388 static inline int hrtimer_is_hres_enabled(void)
390 return hrtimer_hres_enabled;
394 * Is the high resolution mode active ?
396 static inline int hrtimer_hres_active(void)
398 return __get_cpu_var(hrtimer_bases).hres_active;
402 * Reprogram the event source with checking both queues for the
404 * Called with interrupts disabled and base->lock held
406 static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
409 struct hrtimer_clock_base *base = cpu_base->clock_base;
412 cpu_base->expires_next.tv64 = KTIME_MAX;
414 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
415 struct hrtimer *timer;
419 timer = rb_entry(base->first, struct hrtimer, node);
420 expires = ktime_sub(timer->expires, base->offset);
421 if (expires.tv64 < cpu_base->expires_next.tv64)
422 cpu_base->expires_next = expires;
425 if (cpu_base->expires_next.tv64 != KTIME_MAX)
426 tick_program_event(cpu_base->expires_next, 1);
430 * Shared reprogramming for clock_realtime and clock_monotonic
432 * When a timer is enqueued and expires earlier than the already enqueued
433 * timers, we have to check, whether it expires earlier than the timer for
434 * which the clock event device was armed.
436 * Called with interrupts disabled and base->cpu_base.lock held
438 static int hrtimer_reprogram(struct hrtimer *timer,
439 struct hrtimer_clock_base *base)
441 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
442 ktime_t expires = ktime_sub(timer->expires, base->offset);
446 * When the callback is running, we do not reprogram the clock event
447 * device. The timer callback is either running on a different CPU or
448 * the callback is executed in the hrtimer_interrupt context. The
449 * reprogramming is handled either by the softirq, which called the
450 * callback or at the end of the hrtimer_interrupt.
452 if (hrtimer_callback_running(timer))
455 if (expires.tv64 >= expires_next->tv64)
459 * Clockevents returns -ETIME, when the event was in the past.
461 res = tick_program_event(expires, 0);
462 if (!IS_ERR_VALUE(res))
463 *expires_next = expires;
469 * Retrigger next event is called after clock was set
471 * Called with interrupts disabled via on_each_cpu()
473 static void retrigger_next_event(void *arg)
475 struct hrtimer_cpu_base *base;
476 struct timespec realtime_offset;
479 if (!hrtimer_hres_active())
483 seq = read_seqbegin(&xtime_lock);
484 set_normalized_timespec(&realtime_offset,
485 -wall_to_monotonic.tv_sec,
486 -wall_to_monotonic.tv_nsec);
487 } while (read_seqretry(&xtime_lock, seq));
489 base = &__get_cpu_var(hrtimer_bases);
491 /* Adjust CLOCK_REALTIME offset */
492 spin_lock(&base->lock);
493 base->clock_base[CLOCK_REALTIME].offset =
494 timespec_to_ktime(realtime_offset);
496 hrtimer_force_reprogram(base);
497 spin_unlock(&base->lock);
501 * Clock realtime was set
503 * Change the offset of the realtime clock vs. the monotonic
506 * We might have to reprogram the high resolution timer interrupt. On
507 * SMP we call the architecture specific code to retrigger _all_ high
508 * resolution timer interrupts. On UP we just disable interrupts and
509 * call the high resolution interrupt code.
511 void clock_was_set(void)
513 /* Retrigger the CPU local events everywhere */
514 on_each_cpu(retrigger_next_event, NULL, 0, 1);
518 * During resume we might have to reprogram the high resolution timer
519 * interrupt (on the local CPU):
521 void hres_timers_resume(void)
523 WARN_ON_ONCE(num_online_cpus() > 1);
525 /* Retrigger the CPU local events: */
526 retrigger_next_event(NULL);
530 * Initialize the high resolution related parts of cpu_base
532 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
534 base->expires_next.tv64 = KTIME_MAX;
535 base->hres_active = 0;
539 * Initialize the high resolution related parts of a hrtimer
541 static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
546 * When High resolution timers are active, try to reprogram. Note, that in case
547 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
548 * check happens. The timer gets enqueued into the rbtree. The reprogramming
549 * and expiry check is done in the hrtimer_interrupt or in the softirq.
551 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
552 struct hrtimer_clock_base *base)
554 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
556 /* Timer is expired, act upon the callback mode */
557 switch(timer->cb_mode) {
558 case HRTIMER_CB_IRQSAFE_NO_RESTART:
560 * We can call the callback from here. No restart
561 * happens, so no danger of recursion
563 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
565 case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ:
567 * This is solely for the sched tick emulation with
568 * dynamic tick support to ensure that we do not
569 * restart the tick right on the edge and end up with
570 * the tick timer in the softirq ! The calling site
571 * takes care of this.
574 case HRTIMER_CB_IRQSAFE:
575 case HRTIMER_CB_SOFTIRQ:
577 * Move everything else into the softirq pending list !
579 list_add_tail(&timer->cb_entry,
580 &base->cpu_base->cb_pending);
581 timer->state = HRTIMER_STATE_PENDING;
582 raise_softirq(HRTIMER_SOFTIRQ);
592 * Switch to high resolution mode
594 static int hrtimer_switch_to_hres(void)
596 int cpu = smp_processor_id();
597 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
600 if (base->hres_active)
603 local_irq_save(flags);
605 if (tick_init_highres()) {
606 local_irq_restore(flags);
607 printk(KERN_WARNING "Could not switch to high resolution "
608 "mode on CPU %d\n", cpu);
611 base->hres_active = 1;
612 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
613 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
615 tick_setup_sched_timer();
617 /* "Retrigger" the interrupt to get things going */
618 retrigger_next_event(NULL);
619 local_irq_restore(flags);
620 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
627 static inline int hrtimer_hres_active(void) { return 0; }
628 static inline int hrtimer_is_hres_enabled(void) { return 0; }
629 static inline int hrtimer_switch_to_hres(void) { return 0; }
630 static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
631 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
632 struct hrtimer_clock_base *base)
636 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
637 static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
638 static inline int hrtimer_reprogram(struct hrtimer *timer,
639 struct hrtimer_clock_base *base)
644 #endif /* CONFIG_HIGH_RES_TIMERS */
646 #ifdef CONFIG_TIMER_STATS
647 void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
649 if (timer->start_site)
652 timer->start_site = addr;
653 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
654 timer->start_pid = current->pid;
659 * Counterpart to lock_hrtimer_base above:
662 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
664 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
668 * hrtimer_forward - forward the timer expiry
669 * @timer: hrtimer to forward
670 * @now: forward past this time
671 * @interval: the interval to forward
673 * Forward the timer expiry so it will expire in the future.
674 * Returns the number of overruns.
676 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
681 delta = ktime_sub(now, timer->expires);
686 if (interval.tv64 < timer->base->resolution.tv64)
687 interval.tv64 = timer->base->resolution.tv64;
689 if (unlikely(delta.tv64 >= interval.tv64)) {
690 s64 incr = ktime_to_ns(interval);
692 orun = ktime_divns(delta, incr);
693 timer->expires = ktime_add_ns(timer->expires, incr * orun);
694 if (timer->expires.tv64 > now.tv64)
697 * This (and the ktime_add() below) is the
698 * correction for exact:
702 timer->expires = ktime_add_safe(timer->expires, interval);
706 EXPORT_SYMBOL_GPL(hrtimer_forward);
709 * enqueue_hrtimer - internal function to (re)start a timer
711 * The timer is inserted in expiry order. Insertion into the
712 * red black tree is O(log(n)). Must hold the base lock.
714 static void enqueue_hrtimer(struct hrtimer *timer,
715 struct hrtimer_clock_base *base, int reprogram)
717 struct rb_node **link = &base->active.rb_node;
718 struct rb_node *parent = NULL;
719 struct hrtimer *entry;
723 * Find the right place in the rbtree:
727 entry = rb_entry(parent, struct hrtimer, node);
729 * We dont care about collisions. Nodes with
730 * the same expiry time stay together.
732 if (timer->expires.tv64 < entry->expires.tv64) {
733 link = &(*link)->rb_left;
735 link = &(*link)->rb_right;
741 * Insert the timer to the rbtree and check whether it
742 * replaces the first pending timer
746 * Reprogram the clock event device. When the timer is already
747 * expired hrtimer_enqueue_reprogram has either called the
748 * callback or added it to the pending list and raised the
751 * This is a NOP for !HIGHRES
753 if (reprogram && hrtimer_enqueue_reprogram(timer, base))
756 base->first = &timer->node;
759 rb_link_node(&timer->node, parent, link);
760 rb_insert_color(&timer->node, &base->active);
762 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
763 * state of a possibly running callback.
765 timer->state |= HRTIMER_STATE_ENQUEUED;
769 * __remove_hrtimer - internal function to remove a timer
771 * Caller must hold the base lock.
773 * High resolution timer mode reprograms the clock event device when the
774 * timer is the one which expires next. The caller can disable this by setting
775 * reprogram to zero. This is useful, when the context does a reprogramming
776 * anyway (e.g. timer interrupt)
778 static void __remove_hrtimer(struct hrtimer *timer,
779 struct hrtimer_clock_base *base,
780 unsigned long newstate, int reprogram)
782 /* High res. callback list. NOP for !HIGHRES */
783 if (hrtimer_cb_pending(timer))
784 hrtimer_remove_cb_pending(timer);
787 * Remove the timer from the rbtree and replace the
788 * first entry pointer if necessary.
790 if (base->first == &timer->node) {
791 base->first = rb_next(&timer->node);
792 /* Reprogram the clock event device. if enabled */
793 if (reprogram && hrtimer_hres_active())
794 hrtimer_force_reprogram(base->cpu_base);
796 rb_erase(&timer->node, &base->active);
798 timer->state = newstate;
802 * remove hrtimer, called with base lock held
805 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
807 if (hrtimer_is_queued(timer)) {
811 * Remove the timer and force reprogramming when high
812 * resolution mode is active and the timer is on the current
813 * CPU. If we remove a timer on another CPU, reprogramming is
814 * skipped. The interrupt event on this CPU is fired and
815 * reprogramming happens in the interrupt handler. This is a
816 * rare case and less expensive than a smp call.
818 timer_stats_hrtimer_clear_start_info(timer);
819 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
820 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
828 * hrtimer_start - (re)start an relative timer on the current CPU
829 * @timer: the timer to be added
831 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
835 * 1 when the timer was active
838 hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
840 struct hrtimer_clock_base *base, *new_base;
844 base = lock_hrtimer_base(timer, &flags);
846 /* Remove an active timer from the queue: */
847 ret = remove_hrtimer(timer, base);
849 /* Switch the timer base, if necessary: */
850 new_base = switch_hrtimer_base(timer, base);
852 if (mode == HRTIMER_MODE_REL) {
853 tim = ktime_add_safe(tim, new_base->get_time());
855 * CONFIG_TIME_LOW_RES is a temporary way for architectures
856 * to signal that they simply return xtime in
857 * do_gettimeoffset(). In this case we want to round up by
858 * resolution when starting a relative timer, to avoid short
859 * timeouts. This will go away with the GTOD framework.
861 #ifdef CONFIG_TIME_LOW_RES
862 tim = ktime_add_safe(tim, base->resolution);
865 timer->expires = tim;
867 timer_stats_hrtimer_set_start_info(timer);
870 * Only allow reprogramming if the new base is on this CPU.
871 * (it might still be on another CPU if the timer was pending)
873 enqueue_hrtimer(timer, new_base,
874 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
876 unlock_hrtimer_base(timer, &flags);
880 EXPORT_SYMBOL_GPL(hrtimer_start);
883 * hrtimer_try_to_cancel - try to deactivate a timer
884 * @timer: hrtimer to stop
887 * 0 when the timer was not active
888 * 1 when the timer was active
889 * -1 when the timer is currently excuting the callback function and
892 int hrtimer_try_to_cancel(struct hrtimer *timer)
894 struct hrtimer_clock_base *base;
898 base = lock_hrtimer_base(timer, &flags);
900 if (!hrtimer_callback_running(timer))
901 ret = remove_hrtimer(timer, base);
903 unlock_hrtimer_base(timer, &flags);
908 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
911 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
912 * @timer: the timer to be cancelled
915 * 0 when the timer was not active
916 * 1 when the timer was active
918 int hrtimer_cancel(struct hrtimer *timer)
921 int ret = hrtimer_try_to_cancel(timer);
928 EXPORT_SYMBOL_GPL(hrtimer_cancel);
931 * hrtimer_get_remaining - get remaining time for the timer
932 * @timer: the timer to read
934 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
936 struct hrtimer_clock_base *base;
940 base = lock_hrtimer_base(timer, &flags);
941 rem = ktime_sub(timer->expires, base->get_time());
942 unlock_hrtimer_base(timer, &flags);
946 EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
948 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
950 * hrtimer_get_next_event - get the time until next expiry event
952 * Returns the delta to the next expiry event or KTIME_MAX if no timer
955 ktime_t hrtimer_get_next_event(void)
957 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
958 struct hrtimer_clock_base *base = cpu_base->clock_base;
959 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
963 spin_lock_irqsave(&cpu_base->lock, flags);
965 if (!hrtimer_hres_active()) {
966 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
967 struct hrtimer *timer;
972 timer = rb_entry(base->first, struct hrtimer, node);
973 delta.tv64 = timer->expires.tv64;
974 delta = ktime_sub(delta, base->get_time());
975 if (delta.tv64 < mindelta.tv64)
976 mindelta.tv64 = delta.tv64;
980 spin_unlock_irqrestore(&cpu_base->lock, flags);
982 if (mindelta.tv64 < 0)
989 * hrtimer_init - initialize a timer to the given clock
990 * @timer: the timer to be initialized
991 * @clock_id: the clock to be used
992 * @mode: timer mode abs/rel
994 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
995 enum hrtimer_mode mode)
997 struct hrtimer_cpu_base *cpu_base;
999 memset(timer, 0, sizeof(struct hrtimer));
1001 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1003 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1004 clock_id = CLOCK_MONOTONIC;
1006 timer->base = &cpu_base->clock_base[clock_id];
1007 INIT_LIST_HEAD(&timer->cb_entry);
1008 hrtimer_init_timer_hres(timer);
1010 #ifdef CONFIG_TIMER_STATS
1011 timer->start_site = NULL;
1012 timer->start_pid = -1;
1013 memset(timer->start_comm, 0, TASK_COMM_LEN);
1016 EXPORT_SYMBOL_GPL(hrtimer_init);
1019 * hrtimer_get_res - get the timer resolution for a clock
1020 * @which_clock: which clock to query
1021 * @tp: pointer to timespec variable to store the resolution
1023 * Store the resolution of the clock selected by @which_clock in the
1024 * variable pointed to by @tp.
1026 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1028 struct hrtimer_cpu_base *cpu_base;
1030 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1031 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1035 EXPORT_SYMBOL_GPL(hrtimer_get_res);
1037 static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1039 spin_lock_irq(&cpu_base->lock);
1041 while (!list_empty(&cpu_base->cb_pending)) {
1042 enum hrtimer_restart (*fn)(struct hrtimer *);
1043 struct hrtimer *timer;
1046 timer = list_entry(cpu_base->cb_pending.next,
1047 struct hrtimer, cb_entry);
1049 timer_stats_account_hrtimer(timer);
1051 fn = timer->function;
1052 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1053 spin_unlock_irq(&cpu_base->lock);
1055 restart = fn(timer);
1057 spin_lock_irq(&cpu_base->lock);
1059 timer->state &= ~HRTIMER_STATE_CALLBACK;
1060 if (restart == HRTIMER_RESTART) {
1061 BUG_ON(hrtimer_active(timer));
1063 * Enqueue the timer, allow reprogramming of the event
1066 enqueue_hrtimer(timer, timer->base, 1);
1067 } else if (hrtimer_active(timer)) {
1069 * If the timer was rearmed on another CPU, reprogram
1072 if (timer->base->first == &timer->node)
1073 hrtimer_reprogram(timer, timer->base);
1076 spin_unlock_irq(&cpu_base->lock);
1079 static void __run_hrtimer(struct hrtimer *timer)
1081 struct hrtimer_clock_base *base = timer->base;
1082 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1083 enum hrtimer_restart (*fn)(struct hrtimer *);
1086 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1087 timer_stats_account_hrtimer(timer);
1089 fn = timer->function;
1090 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
1092 * Used for scheduler timers, avoid lock inversion with
1093 * rq->lock and tasklist_lock.
1095 * These timers are required to deal with enqueue expiry
1096 * themselves and are not allowed to migrate.
1098 spin_unlock(&cpu_base->lock);
1099 restart = fn(timer);
1100 spin_lock(&cpu_base->lock);
1102 restart = fn(timer);
1105 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
1106 * reprogramming of the event hardware. This happens at the end of this
1109 if (restart != HRTIMER_NORESTART) {
1110 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1111 enqueue_hrtimer(timer, base, 0);
1113 timer->state &= ~HRTIMER_STATE_CALLBACK;
1116 #ifdef CONFIG_HIGH_RES_TIMERS
1119 * High resolution timer interrupt
1120 * Called with interrupts disabled
1122 void hrtimer_interrupt(struct clock_event_device *dev)
1124 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1125 struct hrtimer_clock_base *base;
1126 ktime_t expires_next, now;
1129 BUG_ON(!cpu_base->hres_active);
1130 cpu_base->nr_events++;
1131 dev->next_event.tv64 = KTIME_MAX;
1136 expires_next.tv64 = KTIME_MAX;
1138 base = cpu_base->clock_base;
1140 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1142 struct rb_node *node;
1144 spin_lock(&cpu_base->lock);
1146 basenow = ktime_add(now, base->offset);
1148 while ((node = base->first)) {
1149 struct hrtimer *timer;
1151 timer = rb_entry(node, struct hrtimer, node);
1153 if (basenow.tv64 < timer->expires.tv64) {
1156 expires = ktime_sub(timer->expires,
1158 if (expires.tv64 < expires_next.tv64)
1159 expires_next = expires;
1163 /* Move softirq callbacks to the pending list */
1164 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1165 __remove_hrtimer(timer, base,
1166 HRTIMER_STATE_PENDING, 0);
1167 list_add_tail(&timer->cb_entry,
1168 &base->cpu_base->cb_pending);
1173 __run_hrtimer(timer);
1175 spin_unlock(&cpu_base->lock);
1179 cpu_base->expires_next = expires_next;
1181 /* Reprogramming necessary ? */
1182 if (expires_next.tv64 != KTIME_MAX) {
1183 if (tick_program_event(expires_next, 0))
1187 /* Raise softirq ? */
1189 raise_softirq(HRTIMER_SOFTIRQ);
1192 static void run_hrtimer_softirq(struct softirq_action *h)
1194 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1197 #endif /* CONFIG_HIGH_RES_TIMERS */
1200 * Called from timer softirq every jiffy, expire hrtimers:
1202 * For HRT its the fall back code to run the softirq in the timer
1203 * softirq context in case the hrtimer initialization failed or has
1204 * not been done yet.
1206 void hrtimer_run_pending(void)
1208 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1210 if (hrtimer_hres_active())
1214 * This _is_ ugly: We have to check in the softirq context,
1215 * whether we can switch to highres and / or nohz mode. The
1216 * clocksource switch happens in the timer interrupt with
1217 * xtime_lock held. Notification from there only sets the
1218 * check bit in the tick_oneshot code, otherwise we might
1219 * deadlock vs. xtime_lock.
1221 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1222 hrtimer_switch_to_hres();
1224 run_hrtimer_pending(cpu_base);
1228 * Called from hardirq context every jiffy
1230 static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1233 struct rb_node *node;
1234 struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
1239 if (base->get_softirq_time)
1240 base->softirq_time = base->get_softirq_time();
1242 spin_lock(&cpu_base->lock);
1244 while ((node = base->first)) {
1245 struct hrtimer *timer;
1247 timer = rb_entry(node, struct hrtimer, node);
1248 if (base->softirq_time.tv64 <= timer->expires.tv64)
1251 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1252 __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
1253 list_add_tail(&timer->cb_entry,
1254 &base->cpu_base->cb_pending);
1258 __run_hrtimer(timer);
1260 spin_unlock(&cpu_base->lock);
1263 void hrtimer_run_queues(void)
1265 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1268 if (hrtimer_hres_active())
1271 hrtimer_get_softirq_time(cpu_base);
1273 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1274 run_hrtimer_queue(cpu_base, i);
1278 * Sleep related functions:
1280 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1282 struct hrtimer_sleeper *t =
1283 container_of(timer, struct hrtimer_sleeper, timer);
1284 struct task_struct *task = t->task;
1288 wake_up_process(task);
1290 return HRTIMER_NORESTART;
1293 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1295 sl->timer.function = hrtimer_wakeup;
1297 #ifdef CONFIG_HIGH_RES_TIMERS
1298 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1302 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1304 hrtimer_init_sleeper(t, current);
1307 set_current_state(TASK_INTERRUPTIBLE);
1308 hrtimer_start(&t->timer, t->timer.expires, mode);
1309 if (!hrtimer_active(&t->timer))
1312 if (likely(t->task))
1315 hrtimer_cancel(&t->timer);
1316 mode = HRTIMER_MODE_ABS;
1318 } while (t->task && !signal_pending(current));
1320 __set_current_state(TASK_RUNNING);
1322 return t->task == NULL;
1325 static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1327 struct timespec rmt;
1330 rem = ktime_sub(timer->expires, timer->base->get_time());
1333 rmt = ktime_to_timespec(rem);
1335 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1341 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1343 struct hrtimer_sleeper t;
1344 struct timespec __user *rmtp;
1346 hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
1347 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
1349 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1352 rmtp = (struct timespec __user *)restart->arg1;
1354 int ret = update_rmtp(&t.timer, rmtp);
1359 /* The other values in restart are already filled in */
1360 return -ERESTART_RESTARTBLOCK;
1363 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1364 const enum hrtimer_mode mode, const clockid_t clockid)
1366 struct restart_block *restart;
1367 struct hrtimer_sleeper t;
1369 hrtimer_init(&t.timer, clockid, mode);
1370 t.timer.expires = timespec_to_ktime(*rqtp);
1371 if (do_nanosleep(&t, mode))
1374 /* Absolute timers do not update the rmtp value and restart: */
1375 if (mode == HRTIMER_MODE_ABS)
1376 return -ERESTARTNOHAND;
1379 int ret = update_rmtp(&t.timer, rmtp);
1384 restart = ¤t_thread_info()->restart_block;
1385 restart->fn = hrtimer_nanosleep_restart;
1386 restart->arg0 = (unsigned long) t.timer.base->index;
1387 restart->arg1 = (unsigned long) rmtp;
1388 restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
1389 restart->arg3 = t.timer.expires.tv64 >> 32;
1391 return -ERESTART_RESTARTBLOCK;
1395 sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1399 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1402 if (!timespec_valid(&tu))
1405 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1409 * Functions related to boot-time initialization:
1411 static void __cpuinit init_hrtimers_cpu(int cpu)
1413 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1416 spin_lock_init(&cpu_base->lock);
1417 lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
1419 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1420 cpu_base->clock_base[i].cpu_base = cpu_base;
1422 INIT_LIST_HEAD(&cpu_base->cb_pending);
1423 hrtimer_init_hres(cpu_base);
1426 #ifdef CONFIG_HOTPLUG_CPU
1428 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1429 struct hrtimer_clock_base *new_base)
1431 struct hrtimer *timer;
1432 struct rb_node *node;
1434 while ((node = rb_first(&old_base->active))) {
1435 timer = rb_entry(node, struct hrtimer, node);
1436 BUG_ON(hrtimer_callback_running(timer));
1437 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
1438 timer->base = new_base;
1440 * Enqueue the timer. Allow reprogramming of the event device
1442 enqueue_hrtimer(timer, new_base, 1);
1446 static void migrate_hrtimers(int cpu)
1448 struct hrtimer_cpu_base *old_base, *new_base;
1451 BUG_ON(cpu_online(cpu));
1452 old_base = &per_cpu(hrtimer_bases, cpu);
1453 new_base = &get_cpu_var(hrtimer_bases);
1455 tick_cancel_sched_timer(cpu);
1457 local_irq_disable();
1458 double_spin_lock(&new_base->lock, &old_base->lock,
1459 smp_processor_id() < cpu);
1461 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1462 migrate_hrtimer_list(&old_base->clock_base[i],
1463 &new_base->clock_base[i]);
1466 double_spin_unlock(&new_base->lock, &old_base->lock,
1467 smp_processor_id() < cpu);
1469 put_cpu_var(hrtimer_bases);
1471 #endif /* CONFIG_HOTPLUG_CPU */
1473 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1474 unsigned long action, void *hcpu)
1476 unsigned int cpu = (long)hcpu;
1480 case CPU_UP_PREPARE:
1481 case CPU_UP_PREPARE_FROZEN:
1482 init_hrtimers_cpu(cpu);
1485 #ifdef CONFIG_HOTPLUG_CPU
1487 case CPU_DEAD_FROZEN:
1488 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
1489 migrate_hrtimers(cpu);
1500 static struct notifier_block __cpuinitdata hrtimers_nb = {
1501 .notifier_call = hrtimer_cpu_notify,
1504 void __init hrtimers_init(void)
1506 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1507 (void *)(long)smp_processor_id());
1508 register_cpu_notifier(&hrtimers_nb);
1509 #ifdef CONFIG_HIGH_RES_TIMERS
1510 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq, NULL);