#include <asm/uaccess.h>
+#include <trace/events/timer.h>
+
/*
* The timer bases:
*
for (;;) {
base = timer->base;
if (likely(base != NULL)) {
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
- spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
/* See the comment in lock_timer_base() */
timer->base = NULL;
- spin_unlock(&base->cpu_base->lock);
- spin_lock(&new_base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
+ raw_spin_lock(&new_base->cpu_base->lock);
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
- spin_unlock(&new_base->cpu_base->lock);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_unlock(&new_base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
{
struct hrtimer_clock_base *base = timer->base;
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
+static inline void
+debug_init(struct hrtimer *timer, clockid_t clockid,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init(timer);
+ trace_hrtimer_init(timer, clockid, mode);
+}
+
+static inline void debug_activate(struct hrtimer *timer)
+{
+ debug_hrtimer_activate(timer);
+ trace_hrtimer_start(timer);
+}
+
+static inline void debug_deactivate(struct hrtimer *timer)
+{
+ debug_hrtimer_deactivate(timer);
+ trace_hrtimer_cancel(timer);
+}
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
* next event
* Called with interrupts disabled and base->lock held
*/
-static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
+static void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
int i;
struct hrtimer_clock_base *base = cpu_base->clock_base;
- ktime_t expires;
+ ktime_t expires, expires_next;
- cpu_base->expires_next.tv64 = KTIME_MAX;
+ expires_next.tv64 = KTIME_MAX;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
struct hrtimer *timer;
*/
if (expires.tv64 < 0)
expires.tv64 = 0;
- if (expires.tv64 < cpu_base->expires_next.tv64)
- cpu_base->expires_next = expires;
+ if (expires.tv64 < expires_next.tv64)
+ expires_next = expires;
}
+ if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
+ return;
+
+ cpu_base->expires_next.tv64 = expires_next.tv64;
+
if (cpu_base->expires_next.tv64 != KTIME_MAX)
tick_program_event(cpu_base->expires_next, 1);
}
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
- ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
int res;
if (expires.tv64 < 0)
return -ETIME;
- if (expires.tv64 >= expires_next->tv64)
+ if (expires.tv64 >= cpu_base->expires_next.tv64)
+ return 0;
+
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+ if (cpu_base->hang_detected)
return 0;
/*
*/
res = tick_program_event(expires, 0);
if (!IS_ERR_VALUE(res))
- *expires_next = expires;
+ cpu_base->expires_next = expires;
return res;
}
base = &__get_cpu_var(hrtimer_bases);
/* Adjust CLOCK_REALTIME offset */
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
base->clock_base[CLOCK_REALTIME].offset =
timespec_to_ktime(realtime_offset);
- hrtimer_force_reprogram(base);
- spin_unlock(&base->lock);
+ hrtimer_force_reprogram(base, 0);
+ raw_spin_unlock(&base->lock);
}
/*
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (wakeup) {
- spin_unlock(&base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
} else
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
local_irq_restore(flags);
- printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
- smp_processor_id());
return 1;
}
static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline int hrtimer_switch_to_hres(void) { return 0; }
-static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
+static inline void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base,
int wakeup)
#endif /* CONFIG_HIGH_RES_TIMERS */
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+#ifdef CONFIG_TIMER_STATS
if (timer->start_site)
return;
-
- timer->start_site = addr;
+ timer->start_site = __builtin_return_address(0);
memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
timer->start_pid = current->pid;
+#endif
}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ timer->start_site = NULL;
#endif
+}
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ if (likely(!timer_stats_active))
+ return;
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm, 0);
+#endif
+}
/*
* Counterpart to lock_hrtimer_base above:
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
- spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
struct hrtimer *entry;
int leftmost = 1;
- debug_hrtimer_activate(timer);
+ debug_activate(timer);
/*
* Find the right place in the rbtree:
struct hrtimer_clock_base *base,
unsigned long newstate, int reprogram)
{
- if (timer->state & HRTIMER_STATE_ENQUEUED) {
- /*
- * Remove the timer from the rbtree and replace the
- * first entry pointer if necessary.
- */
- if (base->first == &timer->node) {
- base->first = rb_next(&timer->node);
- /* Reprogram the clock event device. if enabled */
- if (reprogram && hrtimer_hres_active())
- hrtimer_force_reprogram(base->cpu_base);
+ if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ goto out;
+
+ /*
+ * Remove the timer from the rbtree and replace the first
+ * entry pointer if necessary.
+ */
+ if (base->first == &timer->node) {
+ base->first = rb_next(&timer->node);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ /* Reprogram the clock event device. if enabled */
+ if (reprogram && hrtimer_hres_active()) {
+ ktime_t expires;
+
+ expires = ktime_sub(hrtimer_get_expires(timer),
+ base->offset);
+ if (base->cpu_base->expires_next.tv64 == expires.tv64)
+ hrtimer_force_reprogram(base->cpu_base, 1);
}
- rb_erase(&timer->node, &base->active);
+#endif
}
+ rb_erase(&timer->node, &base->active);
+out:
timer->state = newstate;
}
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
unsigned long flags;
int i;
- spin_lock_irqsave(&cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active()) {
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
}
}
- spin_unlock_irqrestore(&cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
if (mindelta.tv64 < 0)
mindelta.tv64 = 0;
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
- debug_hrtimer_init(timer);
+ debug_init(timer, clock_id, mode);
__hrtimer_init(timer, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_init);
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void __run_hrtimer(struct hrtimer *timer)
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
{
struct hrtimer_clock_base *base = timer->base;
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
WARN_ON(!irqs_disabled());
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
fn = timer->function;
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
+ trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
- spin_lock(&cpu_base->lock);
+ trace_hrtimer_expire_exit(timer);
+ raw_spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
#ifdef CONFIG_HIGH_RES_TIMERS
-static int force_clock_reprogram;
-
-/*
- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
- * is hanging, which could happen with something that slows the interrupt
- * such as the tracing. Then we force the clock reprogramming for each future
- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
- * threshold that we will overwrite.
- * The next tick event will be scheduled to 3 times we currently spend on
- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
- * 1/4 of their time to process the hrtimer interrupts. This is enough to
- * let it running without serious starvation.
- */
-
-static inline void
-hrtimer_interrupt_hanging(struct clock_event_device *dev,
- ktime_t try_time)
-{
- force_clock_reprogram = 1;
- dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
- printk(KERN_WARNING "hrtimer: interrupt too slow, "
- "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
-}
/*
* High resolution timer interrupt
* Called with interrupts disabled
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- ktime_t expires_next, now;
- int nr_retries = 0;
- int i;
+ ktime_t expires_next, now, entry_time, delta;
+ int i, retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event.tv64 = KTIME_MAX;
- retry:
- /* 5 retries is enough to notice a hang */
- if (!(++nr_retries % 5))
- hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
-
- now = ktime_get();
-
+ entry_time = now = ktime_get();
+retry:
expires_next.tv64 = KTIME_MAX;
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
break;
}
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &basenow);
}
base++;
}
* against it.
*/
cpu_base->expires_next = expires_next;
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
- if (expires_next.tv64 != KTIME_MAX) {
- if (tick_program_event(expires_next, force_clock_reprogram))
- goto retry;
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
+ return;
}
+
+ /*
+ * The next timer was already expired due to:
+ * - tracing
+ * - long lasting callbacks
+ * - being scheduled away when running in a VM
+ *
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
+ */
+ now = ktime_get();
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+ /*
+ * Give the system a chance to do something else than looping
+ * here. We stored the entry time, so we know exactly how long
+ * we spent here. We schedule the next event this amount of
+ * time away.
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+ /*
+ * Limit it to a sensible value as we enforce a longer
+ * delay. Give the CPU at least 100ms to catch up.
+ */
+ if (delta.tv64 > 100 * NSEC_PER_MSEC)
+ expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
+ else
+ expires_next = ktime_add(now, delta);
+ tick_program_event(expires_next, 1);
+ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+ ktime_to_ns(delta));
}
/*
gettime = 0;
}
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &base->softirq_time);
}
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
}
}
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- spin_lock_init(&cpu_base->lock);
+ raw_spin_lock_init(&cpu_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
/*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();