[PATCH] ipmi: watchdog parms in sysfs
[safe/jmp/linux-2.6] / kernel / posix-cpu-timers.c
index b3f3edc..84af54c 100644 (file)
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
  */
-static inline void bump_cpu_timer(struct k_itimer *timer,
+static void bump_cpu_timer(struct k_itimer *timer,
                                  union cpu_time_count now)
 {
        int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
                for (i = 0; incr < delta - incr; i++)
                        incr = incr << 1;
                for (; i >= 0; incr >>= 1, i--) {
-                       if (delta <= incr)
+                       if (delta < incr)
                                continue;
                        timer->it.cpu.expires.sched += incr;
                        timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
                for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
                             incr = cputime_add(incr, incr);
                for (; i >= 0; incr = cputime_halve(incr), i--) {
-                       if (cputime_le(delta, incr))
+                       if (cputime_lt(delta, incr))
                                continue;
                        timer->it.cpu.expires.cpu =
                                cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
 int posix_cpu_timer_del(struct k_itimer *timer)
 {
        struct task_struct *p = timer->it.cpu.task;
+       int ret = 0;
 
-       if (timer->it.cpu.firing)
-               return TIMER_RETRY;
-
-       if (unlikely(p == NULL))
-               return 0;
+       if (likely(p != NULL)) {
+               read_lock(&tasklist_lock);
+               if (unlikely(p->signal == NULL)) {
+                       /*
+                        * We raced with the reaping of the task.
+                        * The deletion should have cleared us off the list.
+                        */
+                       BUG_ON(!list_empty(&timer->it.cpu.entry));
+               } else {
+                       spin_lock(&p->sighand->siglock);
+                       if (timer->it.cpu.firing)
+                               ret = TIMER_RETRY;
+                       else
+                               list_del(&timer->it.cpu.entry);
+                       spin_unlock(&p->sighand->siglock);
+               }
+               read_unlock(&tasklist_lock);
 
-       spin_lock(&p->sighand->siglock);
-       if (!list_empty(&timer->it.cpu.entry)) {
-               /*
-                * Take us off the task's timer list.  We don't need to
-                * take tasklist_lock and check for the task being reaped.
-                * If it was reaped, it already called posix_cpu_timers_exit
-                * and posix_cpu_timers_exit_group to clear all the timers
-                * that pointed to it.
-                */
-               list_del(&timer->it.cpu.entry);
-               put_task_struct(p);
+               if (!ret)
+                       put_task_struct(p);
        }
-       spin_unlock(&p->sighand->siglock);
 
-       return 0;
+       return ret;
 }
 
 /*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
        cputime_t ptime = cputime_add(utime, stime);
 
        list_for_each_entry_safe(timer, next, head, entry) {
-               put_task_struct(timer->task);
-               timer->task = NULL;
                list_del_init(&timer->entry);
                if (cputime_lt(timer->expires.cpu, ptime)) {
                        timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
 
        ++head;
        list_for_each_entry_safe(timer, next, head, entry) {
-               put_task_struct(timer->task);
-               timer->task = NULL;
                list_del_init(&timer->entry);
                if (cputime_lt(timer->expires.cpu, utime)) {
                        timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
 
        ++head;
        list_for_each_entry_safe(timer, next, head, entry) {
-               put_task_struct(timer->task);
-               timer->task = NULL;
                list_del_init(&timer->entry);
                if (timer->expires.sched < sched_time) {
                        timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
        struct task_struct *t = p;
        unsigned int nthreads = atomic_read(&p->signal->live);
 
+       if (!nthreads)
+               return;
+
        switch (clock_idx) {
        default:
                BUG();
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
                left = cputime_div(cputime_sub(expires.cpu, val.cpu),
                                   nthreads);
                do {
-                       if (!unlikely(t->exit_state)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(prof_ticks(t), left);
                                if (cputime_eq(t->it_prof_expires,
                                               cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
                left = cputime_div(cputime_sub(expires.cpu, val.cpu),
                                   nthreads);
                do {
-                       if (!unlikely(t->exit_state)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(virt_ticks(t), left);
                                if (cputime_eq(t->it_virt_expires,
                                               cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
                nsleft = expires.sched - val.sched;
                do_div(nsleft, nthreads);
                do {
-                       if (!unlikely(t->exit_state)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ns = t->sched_time + nsleft;
                                if (t->it_sched_expires == 0 ||
                                    t->it_sched_expires > ns) {
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
        struct cpu_timer_list *next;
        unsigned long i;
 
+       if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
+               return;
+
        head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
                p->cpu_timers : p->signal->cpu_timers);
        head += CPUCLOCK_WHICH(timer->it_clock);
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
        listpos = head;
        if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
                list_for_each_entry(next, head, entry) {
-                       if (next->expires.sched > nt->expires.sched) {
-                               listpos = &next->entry;
+                       if (next->expires.sched > nt->expires.sched)
                                break;
-                       }
+                       listpos = &next->entry;
                }
        } else {
                list_for_each_entry(next, head, entry) {
-                       if (cputime_gt(next->expires.cpu, nt->expires.cpu)) {
-                               listpos = &next->entry;
+                       if (cputime_gt(next->expires.cpu, nt->expires.cpu))
                                break;
-                       }
+                       listpos = &next->entry;
                }
        }
        list_add(&nt->entry, listpos);
@@ -730,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
         * Disarm any old timer after extracting its expiry time.
         */
        BUG_ON(!irqs_disabled());
+
+       ret = 0;
        spin_lock(&p->sighand->siglock);
        old_expires = timer->it.cpu.expires;
-       list_del_init(&timer->it.cpu.entry);
+       if (unlikely(timer->it.cpu.firing)) {
+               timer->it.cpu.firing = -1;
+               ret = TIMER_RETRY;
+       } else
+               list_del_init(&timer->it.cpu.entry);
        spin_unlock(&p->sighand->siglock);
 
        /*
@@ -780,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
                }
        }
 
-       if (unlikely(timer->it.cpu.firing)) {
+       if (unlikely(ret)) {
                /*
                 * We are colliding with the timer actually firing.
                 * Punt after filling in the timer's old value, and
@@ -788,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
                 * it as an overrun (thanks to bump_cpu_timer above).
                 */
                read_unlock(&tasklist_lock);
-               timer->it.cpu.firing = -1;
-               ret = TIMER_RETRY;
                goto out;
        }
 
@@ -955,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 static void check_thread_timers(struct task_struct *tsk,
                                struct list_head *firing)
 {
+       int maxfire;
        struct list_head *timers = tsk->cpu_timers;
 
+       maxfire = 20;
        tsk->it_prof_expires = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
+               if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
                        tsk->it_prof_expires = t->expires.cpu;
                        break;
                }
@@ -971,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk,
        }
 
        ++timers;
+       maxfire = 20;
        tsk->it_virt_expires = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
+               if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
                        tsk->it_virt_expires = t->expires.cpu;
                        break;
                }
@@ -985,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk,
        }
 
        ++timers;
+       maxfire = 20;
        tsk->it_sched_expires = 0;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (tsk->sched_time < t->expires.sched) {
+               if (!--maxfire || tsk->sched_time < t->expires.sched) {
                        tsk->it_sched_expires = t->expires.sched;
                        break;
                }
@@ -1007,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk,
 static void check_process_timers(struct task_struct *tsk,
                                 struct list_head *firing)
 {
+       int maxfire;
        struct signal_struct *const sig = tsk->signal;
        cputime_t utime, stime, ptime, virt_expires, prof_expires;
        unsigned long long sched_time, sched_expires;
@@ -1039,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk,
        } while (t != tsk);
        ptime = cputime_add(utime, stime);
 
+       maxfire = 20;
        prof_expires = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (cputime_lt(ptime, t->expires.cpu)) {
+               if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
                        prof_expires = t->expires.cpu;
                        break;
                }
@@ -1053,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk,
        }
 
        ++timers;
+       maxfire = 20;
        virt_expires = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (cputime_lt(utime, t->expires.cpu)) {
+               if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
                        virt_expires = t->expires.cpu;
                        break;
                }
@@ -1067,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk,
        }
 
        ++timers;
+       maxfire = 20;
        sched_expires = 0;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_entry(timers->next,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (sched_time < t->expires.sched) {
+               if (!--maxfire || sched_time < t->expires.sched) {
                        sched_expires = t->expires.sched;
                        break;
                }
@@ -1155,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
                unsigned long long sched_left, sched;
                const unsigned int nthreads = atomic_read(&sig->live);
 
+               if (!nthreads)
+                       return;
+
                prof_left = cputime_sub(prof_expires, utime);
                prof_left = cputime_sub(prof_left, stime);
                prof_left = cputime_div(prof_left, nthreads);
@@ -1191,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
 
                        do {
                                t = next_thread(t);
-                       } while (unlikely(t->exit_state));
+                       } while (unlikely(t->flags & PF_EXITING));
                } while (t != tsk);
        }
 }
@@ -1209,7 +1225,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                /*
                 * The task was cleaned up already, no future firings.
                 */
-               return;
+               goto out;
 
        /*
         * Fetch the current sample and update the timer's expiry time.
@@ -1219,7 +1235,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                bump_cpu_timer(timer, now);
                if (unlikely(p->exit_state)) {
                        clear_dead_task(timer, now);
-                       return;
+                       goto out;
                }
                read_lock(&tasklist_lock); /* arm_timer needs it.  */
        } else {
@@ -1232,8 +1248,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                        put_task_struct(p);
                        timer->it.cpu.task = p = NULL;
                        timer->it.cpu.expires.sched = 0;
-                       read_unlock(&tasklist_lock);
-                       return;
+                       goto out_unlock;
                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
                        /*
                         * We've noticed that the thread is dead, but
@@ -1241,8 +1256,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                         * drop our task ref.
                         */
                        clear_dead_task(timer, now);
-                       read_unlock(&tasklist_lock);
-                       return;
+                       goto out_unlock;
                }
                cpu_clock_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
@@ -1254,7 +1268,13 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
         */
        arm_timer(timer, now);
 
+out_unlock:
        read_unlock(&tasklist_lock);
+
+out:
+       timer->it_overrun_last = timer->it_overrun;
+       timer->it_overrun = -1;
+       ++timer->it_requeue_pending;
 }
 
 /*