[PATCH] swsusp: two simplifications
[safe/jmp/linux-2.6] / kernel / timer.c
index 8aadc62..6a2e5f8 100644 (file)
@@ -365,6 +365,34 @@ int del_timer(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer);
 
 #ifdef CONFIG_SMP
+/*
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
+ * exit the timer is not queued and the handler is not running on any CPU.
+ *
+ * It must not be called from interrupt contexts.
+ */
+int try_to_del_timer_sync(struct timer_list *timer)
+{
+       timer_base_t *base;
+       unsigned long flags;
+       int ret = -1;
+
+       base = lock_timer_base(timer, &flags);
+
+       if (base->running_timer == timer)
+               goto out;
+
+       ret = 0;
+       if (timer_pending(timer)) {
+               detach_timer(timer, 1);
+               ret = 1;
+       }
+out:
+       spin_unlock_irqrestore(&base->lock, flags);
+
+       return ret;
+}
+
 /***
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -384,28 +412,13 @@ EXPORT_SYMBOL(del_timer);
  */
 int del_timer_sync(struct timer_list *timer)
 {
-       timer_base_t *base;
-       unsigned long flags;
-       int ret = -1;
-
        check_timer(timer);
 
-       do {
-               base = lock_timer_base(timer, &flags);
-
-               if (base->running_timer == timer)
-                       goto unlock;
-
-               ret = 0;
-               if (timer_pending(timer)) {
-                       detach_timer(timer, 1);
-                       ret = 1;
-               }
-unlock:
-               spin_unlock_irqrestore(&base->lock, flags);
-       } while (ret < 0);
-
-       return ret;
+       for (;;) {
+               int ret = try_to_del_timer_sync(timer);
+               if (ret >= 0)
+                       return ret;
+       }
 }
 
 EXPORT_SYMBOL(del_timer_sync);
@@ -476,10 +489,14 @@ static inline void __run_timers(tvec_base_t *base)
                        detach_timer(timer, 1);
                        spin_unlock_irq(&base->t_base.lock);
                        {
-                               u32 preempt_count = preempt_count();
+                               int preempt_count = preempt_count();
                                fn(data);
                                if (preempt_count != preempt_count()) {
-                                       printk("huh, entered %p with %08x, exited with %08x?\n", fn, preempt_count, preempt_count());
+                                       printk(KERN_WARNING "huh, entered %p "
+                                              "with preempt_count %08x, exited"
+                                              " with %08x?\n",
+                                              fn, preempt_count,
+                                              preempt_count());
                                        BUG();
                                }
                        }
@@ -735,6 +752,15 @@ static void second_overflow(void)
     else
        time_adj += (time_adj >> 2) + (time_adj >> 5);
 #endif
+#if HZ == 250
+    /* Compensate for (HZ==250) != (1 << SHIFT_HZ).
+     * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14)
+     */
+    if (time_adj < 0)
+       time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
+    else
+       time_adj += (time_adj >> 6) + (time_adj >> 7);
+#endif
 #if HZ == 1000
     /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
      * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
@@ -933,6 +959,7 @@ void do_timer(struct pt_regs *regs)
 {
        jiffies_64++;
        update_times();
+       softlockup_tick(regs);
 }
 
 #ifdef __ARCH_WANT_SYS_ALARM
@@ -1006,7 +1033,7 @@ asmlinkage long sys_getppid(void)
        parent = me->group_leader->real_parent;
        for (;;) {
                pid = parent->tgid;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
 {
                struct task_struct *old = parent;
 
@@ -1133,9 +1160,26 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
  out:
        return timeout < 0 ? 0 : timeout;
 }
-
 EXPORT_SYMBOL(schedule_timeout);
 
+/*
+ * We can use __set_current_state() here because schedule_timeout() calls
+ * schedule() unconditionally.
+ */
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+       __set_current_state(TASK_INTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_interruptible);
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+       __set_current_state(TASK_UNINTERRUPTIBLE);
+       return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_uninterruptible);
+
 /* Thread ID - the internal kernel "pid" */
 asmlinkage long sys_gettid(void)
 {
@@ -1152,8 +1196,7 @@ static long __sched nanosleep_restart(struct restart_block *restart)
        if (!time_after(expire, now))
                return 0;
 
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
+       expire = schedule_timeout_interruptible(expire - now);
 
        ret = 0;
        if (expire) {
@@ -1181,8 +1224,7 @@ asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __us
                return -EINVAL;
 
        expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
+       expire = schedule_timeout_interruptible(expire);
 
        ret = 0;
        if (expire) {
@@ -1411,7 +1453,7 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
        }
 }
 
-static inline u64 time_interpolator_get_counter(void)
+static inline u64 time_interpolator_get_counter(int writelock)
 {
        unsigned int src = time_interpolator->source;
 
@@ -1425,6 +1467,15 @@ static inline u64 time_interpolator_get_counter(void)
                        now = time_interpolator_get_cycles(src);
                        if (lcycle && time_after(lcycle, now))
                                return lcycle;
+
+                       /* When holding the xtime write lock, there's no need
+                        * to add the overhead of the cmpxchg.  Readers are
+                        * force to retry until the write lock is released.
+                        */
+                       if (writelock) {
+                               time_interpolator->last_cycle = now;
+                               return now;
+                       }
                        /* Keep track of the last timer value returned. The use of cmpxchg here
                         * will cause contention in an SMP environment.
                         */
@@ -1438,7 +1489,7 @@ static inline u64 time_interpolator_get_counter(void)
 void time_interpolator_reset(void)
 {
        time_interpolator->offset = 0;
-       time_interpolator->last_counter = time_interpolator_get_counter();
+       time_interpolator->last_counter = time_interpolator_get_counter(1);
 }
 
 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
@@ -1450,7 +1501,7 @@ unsigned long time_interpolator_get_offset(void)
                return 0;
 
        return time_interpolator->offset +
-               GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
+               GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
 }
 
 #define INTERPOLATOR_ADJUST 65536
@@ -1473,7 +1524,7 @@ static void time_interpolator_update(long delta_nsec)
         * and the tuning logic insures that.
          */
 
-       counter = time_interpolator_get_counter();
+       counter = time_interpolator_get_counter(1);
        offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
 
        if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
@@ -1571,26 +1622,22 @@ void msleep(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout)
+               timeout = schedule_timeout_uninterruptible(timeout);
 }
 
 EXPORT_SYMBOL(msleep);
 
 /**
- * msleep_interruptible - sleep waiting for waitqueue interruptions
+ * msleep_interruptible - sleep waiting for signals
  * @msecs: Time in milliseconds to sleep for
  */
 unsigned long msleep_interruptible(unsigned int msecs)
 {
        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
 
-       while (timeout && !signal_pending(current)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               timeout = schedule_timeout(timeout);
-       }
+       while (timeout && !signal_pending(current))
+               timeout = schedule_timeout_interruptible(timeout);
        return jiffies_to_msecs(timeout);
 }