string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / kernel / posix-cpu-timers.c
index ae5c6c1..438ff45 100644 (file)
@@ -4,8 +4,27 @@
 
 #include <linux/sched.h>
 #include <linux/posix-timers.h>
-#include <asm/uaccess.h>
 #include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <trace/events/timer.h>
+
+/*
+ * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ */
+void update_rlimit_cpu(unsigned long rlim_new)
+{
+       cputime_t cputime = secs_to_cputime(rlim_new);
+       struct signal_struct *const sig = current->signal;
+
+       if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
+           cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
+               spin_lock_irq(&current->sighand->siglock);
+               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+               spin_unlock_irq(&current->sighand->siglock);
+       }
+}
 
 static int check_clock(const clockid_t which_clock)
 {
@@ -47,12 +66,10 @@ static void sample_to_timespec(const clockid_t which_clock,
                               union cpu_time_count cpu,
                               struct timespec *tp)
 {
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               tp->tv_sec = div_long_long_rem(cpu.sched,
-                                              NSEC_PER_SEC, &tp->tv_nsec);
-       } else {
+       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+               *tp = ns_to_timespec(cpu.sched);
+       else
                cputime_to_timespec(cpu.cpu, tp);
-       }
 }
 
 static inline int cpu_time_before(const clockid_t which_clock,
@@ -159,10 +176,6 @@ static inline cputime_t virt_ticks(struct task_struct *p)
 {
        return p->utime;
 }
-static inline unsigned long long sched_ns(struct task_struct *p)
-{
-       return task_sched_runtime(p);
-}
 
 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
@@ -212,68 +225,105 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
                cpu->cpu = virt_ticks(p);
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = sched_ns(p);
+               cpu->sched = task_sched_runtime(p);
                break;
        }
        return 0;
 }
 
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+{
+       struct sighand_struct *sighand;
+       struct signal_struct *sig;
+       struct task_struct *t;
+
+       *times = INIT_CPUTIME;
+
+       rcu_read_lock();
+       sighand = rcu_dereference(tsk->sighand);
+       if (!sighand)
+               goto out;
+
+       sig = tsk->signal;
+
+       t = tsk;
+       do {
+               times->utime = cputime_add(times->utime, t->utime);
+               times->stime = cputime_add(times->stime, t->stime);
+               times->sum_exec_runtime += t->se.sum_exec_runtime;
+
+               t = next_thread(t);
+       } while (t != tsk);
+
+       times->utime = cputime_add(times->utime, sig->utime);
+       times->stime = cputime_add(times->stime, sig->stime);
+       times->sum_exec_runtime += sig->sum_sched_runtime;
+out:
+       rcu_read_unlock();
+}
+
+static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
+{
+       if (cputime_gt(b->utime, a->utime))
+               a->utime = b->utime;
+
+       if (cputime_gt(b->stime, a->stime))
+               a->stime = b->stime;
+
+       if (b->sum_exec_runtime > a->sum_exec_runtime)
+               a->sum_exec_runtime = b->sum_exec_runtime;
+}
+
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
+{
+       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       struct task_cputime sum;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cputimer->lock, flags);
+       if (!cputimer->running) {
+               cputimer->running = 1;
+               /*
+                * The POSIX timer interface allows for absolute time expiry
+                * values through the TIMER_ABSTIME flag, therefore we have
+                * to synchronize the timer to the clock every time we start
+                * it.
+                */
+               thread_group_cputime(tsk, &sum);
+               update_gt_cputime(&cputimer->cputime, &sum);
+       }
+       *times = cputimer->cputime;
+       spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
 /*
  * Sample a process (thread group) clock for the given group_leader task.
  * Must be called with tasklist_lock held for reading.
- * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
  */
-static int cpu_clock_sample_group_locked(unsigned int clock_idx,
-                                        struct task_struct *p,
-                                        union cpu_time_count *cpu)
+static int cpu_clock_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 union cpu_time_count *cpu)
 {
-       struct task_struct *t = p;
-       switch (clock_idx) {
+       struct task_cputime cputime;
+
+       switch (CPUCLOCK_WHICH(which_clock)) {
        default:
                return -EINVAL;
        case CPUCLOCK_PROF:
-               cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
-               do {
-                       cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
-                       t = next_thread(t);
-               } while (t != p);
+               thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
                break;
        case CPUCLOCK_VIRT:
-               cpu->cpu = p->signal->utime;
-               do {
-                       cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
-                       t = next_thread(t);
-               } while (t != p);
+               thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = p->signal->sum_sched_runtime;
-               /* Add in each other live thread.  */
-               while ((t = next_thread(t)) != p) {
-                       cpu->sched += t->se.sum_exec_runtime;
-               }
-               cpu->sched += sched_ns(p);
+               cpu->sched = thread_group_sched_runtime(p);
                break;
        }
        return 0;
 }
 
-/*
- * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
- */
-static int cpu_clock_sample_group(const clockid_t which_clock,
-                                 struct task_struct *p,
-                                 union cpu_time_count *cpu)
-{
-       int ret;
-       unsigned long flags;
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-       ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
-                                           cpu);
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       return ret;
-}
-
 
 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 {
@@ -334,7 +384,8 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 
 /*
  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
- * This is called from sys_timer_create with the new timer already locked.
+ * This is called from sys_timer_create() and do_cpu_nanosleep() with the
+ * new timer already all-zeros initialized.
  */
 int posix_cpu_timer_create(struct k_itimer *new_timer)
 {
@@ -346,8 +397,6 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
                return -EINVAL;
 
        INIT_LIST_HEAD(&new_timer->it.cpu.entry);
-       new_timer->it.cpu.incr.sched = 0;
-       new_timer->it.cpu.expires.sched = 0;
 
        read_lock(&tasklist_lock);
        if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
@@ -472,80 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
-       cleanup_timers(tsk->signal->cpu_timers,
-                      cputime_add(tsk->utime, tsk->signal->utime),
-                      cputime_add(tsk->stime, tsk->signal->stime),
-                    tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
-}
-
-
-/*
- * Set the expiry times of all the threads in the process so one of them
- * will go off before the process cumulative expiry total is reached.
- */
-static void process_timer_rebalance(struct task_struct *p,
-                                   unsigned int clock_idx,
-                                   union cpu_time_count expires,
-                                   union cpu_time_count val)
-{
-       cputime_t ticks, left;
-       unsigned long long ns, nsleft;
-       struct task_struct *t = p;
-       unsigned int nthreads = atomic_read(&p->signal->live);
-
-       if (!nthreads)
-               return;
+       struct signal_struct *const sig = tsk->signal;
 
-       switch (clock_idx) {
-       default:
-               BUG();
-               break;
-       case CPUCLOCK_PROF:
-               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-                                      nthreads);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ticks = cputime_add(prof_ticks(t), left);
-                               if (cputime_eq(t->it_prof_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(t->it_prof_expires, ticks)) {
-                                       t->it_prof_expires = ticks;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       case CPUCLOCK_VIRT:
-               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-                                      nthreads);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ticks = cputime_add(virt_ticks(t), left);
-                               if (cputime_eq(t->it_virt_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(t->it_virt_expires, ticks)) {
-                                       t->it_virt_expires = ticks;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       case CPUCLOCK_SCHED:
-               nsleft = expires.sched - val.sched;
-               do_div(nsleft, nthreads);
-               nsleft = max_t(unsigned long long, nsleft, 1);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ns = t->se.sum_exec_runtime + nsleft;
-                               if (t->it_sched_expires == 0 ||
-                                   t->it_sched_expires > ns) {
-                                       t->it_sched_expires = ns;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       }
+       cleanup_timers(tsk->signal->cpu_timers,
+                      cputime_add(tsk->utime, sig->utime),
+                      cputime_add(tsk->stime, sig->stime),
+                      tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
 }
 
 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -561,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
                                             now);
 }
 
+static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+{
+       return cputime_eq(expires, cputime_zero) ||
+              cputime_gt(expires, new_exp);
+}
+
+static inline int expires_le(cputime_t expires, cputime_t new_exp)
+{
+       return !cputime_eq(expires, cputime_zero) &&
+              cputime_le(expires, new_exp);
+}
 /*
  * Insert the timer on the appropriate list before any timers that
  * expire later.  This must be called with the tasklist_lock held
@@ -605,61 +597,56 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
                 */
 
                if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+                       union cpu_time_count *exp = &nt->expires;
+
                        switch (CPUCLOCK_WHICH(timer->it_clock)) {
                        default:
                                BUG();
                        case CPUCLOCK_PROF:
-                               if (cputime_eq(p->it_prof_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(p->it_prof_expires,
-                                              nt->expires.cpu))
-                                       p->it_prof_expires = nt->expires.cpu;
+                               if (expires_gt(p->cputime_expires.prof_exp,
+                                              exp->cpu))
+                                       p->cputime_expires.prof_exp = exp->cpu;
                                break;
                        case CPUCLOCK_VIRT:
-                               if (cputime_eq(p->it_virt_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(p->it_virt_expires,
-                                              nt->expires.cpu))
-                                       p->it_virt_expires = nt->expires.cpu;
+                               if (expires_gt(p->cputime_expires.virt_exp,
+                                              exp->cpu))
+                                       p->cputime_expires.virt_exp = exp->cpu;
                                break;
                        case CPUCLOCK_SCHED:
-                               if (p->it_sched_expires == 0 ||
-                                   p->it_sched_expires > nt->expires.sched)
-                                       p->it_sched_expires = nt->expires.sched;
+                               if (p->cputime_expires.sched_exp == 0 ||
+                                   p->cputime_expires.sched_exp > exp->sched)
+                                       p->cputime_expires.sched_exp =
+                                                               exp->sched;
                                break;
                        }
                } else {
+                       struct signal_struct *const sig = p->signal;
+                       union cpu_time_count *exp = &timer->it.cpu.expires;
+
                        /*
-                        * For a process timer, we must balance
-                        * all the live threads' expirations.
+                        * For a process timer, set the cached expiration time.
                         */
                        switch (CPUCLOCK_WHICH(timer->it_clock)) {
                        default:
                                BUG();
                        case CPUCLOCK_VIRT:
-                               if (!cputime_eq(p->signal->it_virt_expires,
-                                               cputime_zero) &&
-                                   cputime_lt(p->signal->it_virt_expires,
-                                              timer->it.cpu.expires.cpu))
+                               if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
+                                              exp->cpu))
                                        break;
-                               goto rebalance;
+                               sig->cputime_expires.virt_exp = exp->cpu;
+                               break;
                        case CPUCLOCK_PROF:
-                               if (!cputime_eq(p->signal->it_prof_expires,
-                                               cputime_zero) &&
-                                   cputime_lt(p->signal->it_prof_expires,
-                                              timer->it.cpu.expires.cpu))
+                               if (expires_le(sig->it[CPUCLOCK_PROF].expires,
+                                              exp->cpu))
                                        break;
-                               i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
+                               i = sig->rlim[RLIMIT_CPU].rlim_cur;
                                if (i != RLIM_INFINITY &&
-                                   i <= cputime_to_secs(timer->it.cpu.expires.cpu))
+                                   i <= cputime_to_secs(exp->cpu))
                                        break;
-                               goto rebalance;
+                               sig->cputime_expires.prof_exp = exp->cpu;
+                               break;
                        case CPUCLOCK_SCHED:
-                       rebalance:
-                               process_timer_rebalance(
-                                       timer->it.cpu.task,
-                                       CPUCLOCK_WHICH(timer->it_clock),
-                                       timer->it.cpu.expires, now);
+                               sig->cputime_expires.sched_exp = exp->sched;
                                break;
                        }
                }
@@ -698,6 +685,33 @@ static void cpu_timer_fire(struct k_itimer *timer)
 }
 
 /*
+ * Sample a process (thread group) timer for the given group_leader task.
+ * Must be called with tasklist_lock held for reading.
+ */
+static int cpu_timer_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 union cpu_time_count *cpu)
+{
+       struct task_cputime cputime;
+
+       thread_group_cputimer(p, &cputime);
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
+               cpu->cpu = cputime.utime;
+               break;
+       case CPUCLOCK_SCHED:
+               cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+               break;
+       }
+       return 0;
+}
+
+/*
  * Guts of sys_timer_settime for CPU timers.
  * This is called with the timer locked and interrupts disabled.
  * If we return TIMER_RETRY, it's necessary to release the timer's lock
@@ -758,7 +772,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
                cpu_clock_sample(timer->it_clock, p, &val);
        } else {
-               cpu_clock_sample_group(timer->it_clock, p, &val);
+               cpu_timer_sample_group(timer->it_clock, p, &val);
        }
 
        if (old) {
@@ -906,7 +920,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                        read_unlock(&tasklist_lock);
                        goto dead;
                } else {
-                       cpu_clock_sample_group(timer->it_clock, p, &now);
+                       cpu_timer_sample_group(timer->it_clock, p, &now);
                        clear_dead = (unlikely(p->exit_state) &&
                                      thread_group_empty(p));
                }
@@ -970,13 +984,13 @@ static void check_thread_timers(struct task_struct *tsk,
        struct signal_struct *const sig = tsk->signal;
 
        maxfire = 20;
-       tsk->it_prof_expires = cputime_zero;
+       tsk->cputime_expires.prof_exp = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
-                       tsk->it_prof_expires = t->expires.cpu;
+                       tsk->cputime_expires.prof_exp = t->expires.cpu;
                        break;
                }
                t->firing = 1;
@@ -985,13 +999,13 @@ static void check_thread_timers(struct task_struct *tsk,
 
        ++timers;
        maxfire = 20;
-       tsk->it_virt_expires = cputime_zero;
+       tsk->cputime_expires.virt_exp = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
-                       tsk->it_virt_expires = t->expires.cpu;
+                       tsk->cputime_expires.virt_exp = t->expires.cpu;
                        break;
                }
                t->firing = 1;
@@ -1000,13 +1014,13 @@ static void check_thread_timers(struct task_struct *tsk,
 
        ++timers;
        maxfire = 20;
-       tsk->it_sched_expires = 0;
+       tsk->cputime_expires.sched_exp = 0;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
-                       tsk->it_sched_expires = t->expires.sched;
+                       tsk->cputime_expires.sched_exp = t->expires.sched;
                        break;
                }
                t->firing = 1;
@@ -1038,11 +1052,61 @@ static void check_thread_timers(struct task_struct *tsk,
                                sig->rlim[RLIMIT_RTTIME].rlim_cur +=
                                                                USEC_PER_SEC;
                        }
+                       printk(KERN_INFO
+                               "RT Watchdog Timeout: %s[%d]\n",
+                               tsk->comm, task_pid_nr(tsk));
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
                }
        }
 }
 
+static void stop_process_timers(struct task_struct *tsk)
+{
+       struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+       unsigned long flags;
+
+       if (!cputimer->running)
+               return;
+
+       spin_lock_irqsave(&cputimer->lock, flags);
+       cputimer->running = 0;
+       spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
+static u32 onecputick;
+
+static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
+                            cputime_t *expires, cputime_t cur_time, int signo)
+{
+       if (cputime_eq(it->expires, cputime_zero))
+               return;
+
+       if (cputime_ge(cur_time, it->expires)) {
+               if (!cputime_eq(it->incr, cputime_zero)) {
+                       it->expires = cputime_add(it->expires, it->incr);
+                       it->error += it->incr_error;
+                       if (it->error >= onecputick) {
+                               it->expires = cputime_sub(it->expires,
+                                                         cputime_one_jiffy);
+                               it->error -= onecputick;
+                       }
+               } else {
+                       it->expires = cputime_zero;
+               }
+
+               trace_itimer_expire(signo == SIGPROF ?
+                                   ITIMER_PROF : ITIMER_VIRTUAL,
+                                   tsk->signal->leader_pid, cur_time);
+               __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+       }
+
+       if (!cputime_eq(it->expires, cputime_zero) &&
+           (cputime_eq(*expires, cputime_zero) ||
+            cputime_lt(it->expires, *expires))) {
+               *expires = it->expires;
+       }
+}
+
 /*
  * Check for any per-thread CPU timers that have fired and move them
  * off the tsk->*_timers list onto the firing list.  Per-thread timers
@@ -1053,37 +1117,31 @@ static void check_process_timers(struct task_struct *tsk,
 {
        int maxfire;
        struct signal_struct *const sig = tsk->signal;
-       cputime_t utime, stime, ptime, virt_expires, prof_expires;
+       cputime_t utime, ptime, virt_expires, prof_expires;
        unsigned long long sum_sched_runtime, sched_expires;
-       struct task_struct *t;
        struct list_head *timers = sig->cpu_timers;
+       struct task_cputime cputime;
 
        /*
         * Don't sample the current process CPU clocks if there are no timers.
         */
        if (list_empty(&timers[CPUCLOCK_PROF]) &&
-           cputime_eq(sig->it_prof_expires, cputime_zero) &&
+           cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
            sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
            list_empty(&timers[CPUCLOCK_VIRT]) &&
-           cputime_eq(sig->it_virt_expires, cputime_zero) &&
-           list_empty(&timers[CPUCLOCK_SCHED]))
+           cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
+           list_empty(&timers[CPUCLOCK_SCHED])) {
+               stop_process_timers(tsk);
                return;
+       }
 
        /*
         * Collect the current process totals.
         */
-       utime = sig->utime;
-       stime = sig->stime;
-       sum_sched_runtime = sig->sum_sched_runtime;
-       t = tsk;
-       do {
-               utime = cputime_add(utime, t->utime);
-               stime = cputime_add(stime, t->stime);
-               sum_sched_runtime += t->se.sum_exec_runtime;
-               t = next_thread(t);
-       } while (t != tsk);
-       ptime = cputime_add(utime, stime);
-
+       thread_group_cputimer(tsk, &cputime);
+       utime = cputime.utime;
+       ptime = cputime_add(utime, cputime.stime);
+       sum_sched_runtime = cputime.sum_exec_runtime;
        maxfire = 20;
        prof_expires = cputime_zero;
        while (!list_empty(timers)) {
@@ -1131,38 +1189,11 @@ static void check_process_timers(struct task_struct *tsk,
        /*
         * Check for the special case process timers.
         */
-       if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
-               if (cputime_ge(ptime, sig->it_prof_expires)) {
-                       /* ITIMER_PROF fires and reloads.  */
-                       sig->it_prof_expires = sig->it_prof_incr;
-                       if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
-                               sig->it_prof_expires = cputime_add(
-                                       sig->it_prof_expires, ptime);
-                       }
-                       __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
-               }
-               if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
-                   (cputime_eq(prof_expires, cputime_zero) ||
-                    cputime_lt(sig->it_prof_expires, prof_expires))) {
-                       prof_expires = sig->it_prof_expires;
-               }
-       }
-       if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
-               if (cputime_ge(utime, sig->it_virt_expires)) {
-                       /* ITIMER_VIRTUAL fires and reloads.  */
-                       sig->it_virt_expires = sig->it_virt_incr;
-                       if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
-                               sig->it_virt_expires = cputime_add(
-                                       sig->it_virt_expires, utime);
-                       }
-                       __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
-               }
-               if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
-                   (cputime_eq(virt_expires, cputime_zero) ||
-                    cputime_lt(sig->it_virt_expires, virt_expires))) {
-                       virt_expires = sig->it_virt_expires;
-               }
-       }
+       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
+                        SIGPROF);
+       check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
+                        SIGVTALRM);
+
        if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
                cputime_t x;
@@ -1191,60 +1222,18 @@ static void check_process_timers(struct task_struct *tsk,
                }
        }
 
-       if (!cputime_eq(prof_expires, cputime_zero) ||
-           !cputime_eq(virt_expires, cputime_zero) ||
-           sched_expires != 0) {
-               /*
-                * Rebalance the threads' expiry times for the remaining
-                * process CPU timers.
-                */
-
-               cputime_t prof_left, virt_left, ticks;
-               unsigned long long sched_left, sched;
-               const unsigned int nthreads = atomic_read(&sig->live);
-
-               if (!nthreads)
-                       return;
-
-               prof_left = cputime_sub(prof_expires, utime);
-               prof_left = cputime_sub(prof_left, stime);
-               prof_left = cputime_div_non_zero(prof_left, nthreads);
-               virt_left = cputime_sub(virt_expires, utime);
-               virt_left = cputime_div_non_zero(virt_left, nthreads);
-               if (sched_expires) {
-                       sched_left = sched_expires - sum_sched_runtime;
-                       do_div(sched_left, nthreads);
-                       sched_left = max_t(unsigned long long, sched_left, 1);
-               } else {
-                       sched_left = 0;
-               }
-               t = tsk;
-               do {
-                       if (unlikely(t->flags & PF_EXITING))
-                               continue;
-
-                       ticks = cputime_add(cputime_add(t->utime, t->stime),
-                                           prof_left);
-                       if (!cputime_eq(prof_expires, cputime_zero) &&
-                           (cputime_eq(t->it_prof_expires, cputime_zero) ||
-                            cputime_gt(t->it_prof_expires, ticks))) {
-                               t->it_prof_expires = ticks;
-                       }
-
-                       ticks = cputime_add(t->utime, virt_left);
-                       if (!cputime_eq(virt_expires, cputime_zero) &&
-                           (cputime_eq(t->it_virt_expires, cputime_zero) ||
-                            cputime_gt(t->it_virt_expires, ticks))) {
-                               t->it_virt_expires = ticks;
-                       }
-
-                       sched = t->se.sum_exec_runtime + sched_left;
-                       if (sched_expires && (t->it_sched_expires == 0 ||
-                                             t->it_sched_expires > sched)) {
-                               t->it_sched_expires = sched;
-                       }
-               } while ((t = next_thread(t)) != tsk);
-       }
+       if (!cputime_eq(prof_expires, cputime_zero) &&
+           (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
+            cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
+               sig->cputime_expires.prof_exp = prof_expires;
+       if (!cputime_eq(virt_expires, cputime_zero) &&
+           (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
+            cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
+               sig->cputime_expires.virt_exp = virt_expires;
+       if (sched_expires != 0 &&
+           (sig->cputime_expires.sched_exp == 0 ||
+            sig->cputime_expires.sched_exp > sched_expires))
+               sig->cputime_expires.sched_exp = sched_expires;
 }
 
 /*
@@ -1293,7 +1282,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                        clear_dead_task(timer, now);
                        goto out_unlock;
                }
-               cpu_clock_sample_group(timer->it_clock, p, &now);
+               cpu_timer_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
                /* Leave the tasklist_lock locked for the call below.  */
        }
@@ -1312,6 +1301,90 @@ out:
        ++timer->it_requeue_pending;
 }
 
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime:   The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero.  Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+       if (cputime_eq(cputime->utime, cputime_zero) &&
+           cputime_eq(cputime->stime, cputime_zero) &&
+           cputime->sum_exec_runtime == 0)
+               return 1;
+       return 0;
+}
+
+/**
+ * task_cputime_expired - Compare two task_cputime entities.
+ *
+ * @sample:    The task_cputime structure to be checked for expiration.
+ * @expires:   Expiration times, against which @sample will be checked.
+ *
+ * Checks @sample against @expires to see if any field of @sample has expired.
+ * Returns true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set.  Otherwise returns false.
+ */
+static inline int task_cputime_expired(const struct task_cputime *sample,
+                                       const struct task_cputime *expires)
+{
+       if (!cputime_eq(expires->utime, cputime_zero) &&
+           cputime_ge(sample->utime, expires->utime))
+               return 1;
+       if (!cputime_eq(expires->stime, cputime_zero) &&
+           cputime_ge(cputime_add(sample->utime, sample->stime),
+                      expires->stime))
+               return 1;
+       if (expires->sum_exec_runtime != 0 &&
+           sample->sum_exec_runtime >= expires->sum_exec_runtime)
+               return 1;
+       return 0;
+}
+
+/**
+ * fastpath_timer_check - POSIX CPU timers fast path.
+ *
+ * @tsk:       The task (thread) being checked.
+ *
+ * Check the task and thread group timers.  If both are zero (there are no
+ * timers set) return false.  Otherwise snapshot the task and thread group
+ * timers and compare them with the corresponding expiration times.  Return
+ * true if a timer has expired, else return false.
+ */
+static inline int fastpath_timer_check(struct task_struct *tsk)
+{
+       struct signal_struct *sig;
+
+       /* tsk == current, ensure it is safe to use ->signal/sighand */
+       if (unlikely(tsk->exit_state))
+               return 0;
+
+       if (!task_cputime_zero(&tsk->cputime_expires)) {
+               struct task_cputime task_sample = {
+                       .utime = tsk->utime,
+                       .stime = tsk->stime,
+                       .sum_exec_runtime = tsk->se.sum_exec_runtime
+               };
+
+               if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+                       return 1;
+       }
+
+       sig = tsk->signal;
+       if (!task_cputime_zero(&sig->cputime_expires)) {
+               struct task_cputime group_sample;
+
+               thread_group_cputimer(tsk, &group_sample);
+               if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+                       return 1;
+       }
+
+       return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
+}
+
 /*
  * This is called from the timer interrupt handler.  The irq handler has
  * already updated our counts.  We need to check if any timers fire now.
@@ -1324,42 +1397,31 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
        BUG_ON(!irqs_disabled());
 
-#define UNEXPIRED(clock) \
-               (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
-                cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
-
-       if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
-           (tsk->it_sched_expires == 0 ||
-            tsk->se.sum_exec_runtime < tsk->it_sched_expires))
+       /*
+        * The fast path checks that there are no expired thread or thread
+        * group timers.  If that's so, just return.
+        */
+       if (!fastpath_timer_check(tsk))
                return;
 
-#undef UNEXPIRED
-
+       spin_lock(&tsk->sighand->siglock);
        /*
-        * Double-check with locks held.
+        * Here we take off tsk->signal->cpu_timers[N] and
+        * tsk->cpu_timers[N] all the timers that are firing, and
+        * put them on the firing list.
         */
-       read_lock(&tasklist_lock);
-       if (likely(tsk->signal != NULL)) {
-               spin_lock(&tsk->sighand->siglock);
+       check_thread_timers(tsk, &firing);
+       check_process_timers(tsk, &firing);
 
-               /*
-                * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-                * all the timers that are firing, and put them on the firing list.
-                */
-               check_thread_timers(tsk, &firing);
-               check_process_timers(tsk, &firing);
-
-               /*
-                * We must release these locks before taking any timer's lock.
-                * There is a potential race with timer deletion here, as the
-                * siglock now protects our private firing list.  We have set
-                * the firing flag in each timer, so that a deletion attempt
-                * that gets the timer lock before we do will give it up and
-                * spin until we've taken care of that timer below.
-                */
-               spin_unlock(&tsk->sighand->siglock);
-       }
-       read_unlock(&tasklist_lock);
+       /*
+        * We must release these locks before taking any timer's lock.
+        * There is a potential race with timer deletion here, as the
+        * siglock now protects our private firing list.  We have set
+        * the firing flag in each timer, so that a deletion attempt
+        * that gets the timer lock before we do will give it up and
+        * spin until we've taken care of that timer below.
+        */
+       spin_unlock(&tsk->sighand->siglock);
 
        /*
         * Now that all the timers on our list have the firing flag,
@@ -1368,29 +1430,28 @@ void run_posix_cpu_timers(struct task_struct *tsk)
         * timer call will interfere.
         */
        list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
-               int firing;
+               int cpu_firing;
+
                spin_lock(&timer->it_lock);
                list_del_init(&timer->it.cpu.entry);
-               firing = timer->it.cpu.firing;
+               cpu_firing = timer->it.cpu.firing;
                timer->it.cpu.firing = 0;
                /*
                 * The firing flag is -1 if we collided with a reset
                 * of the timer, which already reported this
                 * almost-firing as an overrun.  So don't generate an event.
                 */
-               if (likely(firing >= 0)) {
+               if (likely(cpu_firing >= 0))
                        cpu_timer_fire(timer);
-               }
                spin_unlock(&timer->it_lock);
        }
 }
 
 /*
  * Set one of the process-wide special case CPU timers.
- * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
- * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
- * absolute; non-null for ITIMER_*, where *newval is relative and we update
- * it to be absolute, *oldval is absolute and we update it to be relative.
+ * The tsk->sighand->siglock must be held by the caller.
+ * The *newval argument is relative and we update it to be absolute, *oldval
+ * is absolute and we update it to be relative.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
                           cputime_t *newval, cputime_t *oldval)
@@ -1399,13 +1460,13 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
        struct list_head *head;
 
        BUG_ON(clock_idx == CPUCLOCK_SCHED);
-       cpu_clock_sample_group_locked(clock_idx, tsk, &now);
+       cpu_timer_sample_group(clock_idx, tsk, &now);
 
        if (oldval) {
                if (!cputime_eq(*oldval, cputime_zero)) {
                        if (cputime_le(*oldval, now.cpu)) {
                                /* Just about to fire. */
-                               *oldval = jiffies_to_cputime(1);
+                               *oldval = cputime_one_jiffy;
                        } else {
                                *oldval = cputime_sub(*oldval, now.cpu);
                        }
@@ -1433,13 +1494,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
            cputime_ge(list_first_entry(head,
                                  struct cpu_timer_list, entry)->expires.cpu,
                       *newval)) {
-               /*
-                * Rejigger each thread's expiry time so that one will
-                * notice before we hit the process-cumulative expiry time.
-                */
-               union cpu_time_count expires = { .sched = 0 };
-               expires.cpu = *newval;
-               process_timer_rebalance(tsk, clock_idx, expires, now);
+               switch (clock_idx) {
+               case CPUCLOCK_PROF:
+                       tsk->signal->cputime_expires.prof_exp = *newval;
+                       break;
+               case CPUCLOCK_VIRT:
+                       tsk->signal->cputime_expires.virt_exp = *newval;
+                       break;
+               }
        }
 }
 
@@ -1650,10 +1712,15 @@ static __init int init_posix_cpu_timers(void)
                .nsleep = thread_cpu_nsleep,
                .nsleep_restart = thread_cpu_nsleep_restart,
        };
+       struct timespec ts;
 
        register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
        register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
 
+       cputime_to_timespec(cputime_one_jiffy, &ts);
+       onecputick = ts.tv_nsec;
+       WARN_ON(ts.tv_sec != 0);
+
        return 0;
 }
 __initcall(init_posix_cpu_timers);