2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
15 void update_rlimit_cpu(unsigned long rlim_new)
17 cputime_t cputime = secs_to_cputime(rlim_new);
18 struct signal_struct *const sig = current->signal;
20 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
21 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
22 spin_lock_irq(¤t->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(¤t->sighand->siglock);
28 static int check_clock(const clockid_t which_clock)
31 struct task_struct *p;
32 const pid_t pid = CPUCLOCK_PID(which_clock);
34 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
40 read_lock(&tasklist_lock);
41 p = find_task_by_vpid(pid);
42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43 same_thread_group(p, current) : thread_group_leader(p))) {
46 read_unlock(&tasklist_lock);
51 static inline union cpu_time_count
52 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
54 union cpu_time_count ret;
55 ret.sched = 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
57 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
59 ret.cpu = timespec_to_cputime(tp);
64 static void sample_to_timespec(const clockid_t which_clock,
65 union cpu_time_count cpu,
68 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69 *tp = ns_to_timespec(cpu.sched);
71 cputime_to_timespec(cpu.cpu, tp);
74 static inline int cpu_time_before(const clockid_t which_clock,
75 union cpu_time_count now,
76 union cpu_time_count then)
78 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79 return now.sched < then.sched;
81 return cputime_lt(now.cpu, then.cpu);
84 static inline void cpu_time_add(const clockid_t which_clock,
85 union cpu_time_count *acc,
86 union cpu_time_count val)
88 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89 acc->sched += val.sched;
91 acc->cpu = cputime_add(acc->cpu, val.cpu);
94 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
95 union cpu_time_count a,
96 union cpu_time_count b)
98 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
101 a.cpu = cputime_sub(a.cpu, b.cpu);
107 * Divide and limit the result to res >= 1
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
112 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
114 cputime_t res = cputime_div(time, div);
116 return max_t(cputime_t, res, 1);
120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
123 static void bump_cpu_timer(struct k_itimer *timer,
124 union cpu_time_count now)
128 if (timer->it.cpu.incr.sched == 0)
131 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132 unsigned long long delta, incr;
134 if (now.sched < timer->it.cpu.expires.sched)
136 incr = timer->it.cpu.incr.sched;
137 delta = now.sched + incr - timer->it.cpu.expires.sched;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i = 0; incr < delta - incr; i++)
141 for (; i >= 0; incr >>= 1, i--) {
144 timer->it.cpu.expires.sched += incr;
145 timer->it_overrun += 1 << i;
149 cputime_t delta, incr;
151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
153 incr = timer->it.cpu.incr.cpu;
154 delta = cputime_sub(cputime_add(now.cpu, incr),
155 timer->it.cpu.expires.cpu);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158 incr = cputime_add(incr, incr);
159 for (; i >= 0; incr = cputime_halve(incr), i--) {
160 if (cputime_lt(delta, incr))
162 timer->it.cpu.expires.cpu =
163 cputime_add(timer->it.cpu.expires.cpu, incr);
164 timer->it_overrun += 1 << i;
165 delta = cputime_sub(delta, incr);
170 static inline cputime_t prof_ticks(struct task_struct *p)
172 return cputime_add(p->utime, p->stime);
174 static inline cputime_t virt_ticks(struct task_struct *p)
179 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
181 int error = check_clock(which_clock);
184 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
185 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
187 * If sched_clock is using a cycle counter, we
188 * don't have any idea of its true resolution
189 * exported, but it is much more than 1s/HZ.
197 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
200 * You can never reset a CPU clock, but we check for other errors
201 * in the call before failing with EPERM.
203 int error = check_clock(which_clock);
212 * Sample a per-thread clock for the given task.
214 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
215 union cpu_time_count *cpu)
217 switch (CPUCLOCK_WHICH(which_clock)) {
221 cpu->cpu = prof_ticks(p);
224 cpu->cpu = virt_ticks(p);
227 cpu->sched = task_sched_runtime(p);
233 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
235 struct sighand_struct *sighand;
236 struct signal_struct *sig;
237 struct task_struct *t;
239 *times = INIT_CPUTIME;
242 sighand = rcu_dereference(tsk->sighand);
250 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime;
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
264 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
266 if (cputime_gt(b->utime, a->utime))
269 if (cputime_gt(b->stime, a->stime))
272 if (b->sum_exec_runtime > a->sum_exec_runtime)
273 a->sum_exec_runtime = b->sum_exec_runtime;
276 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
278 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279 struct task_cputime sum;
282 spin_lock_irqsave(&cputimer->lock, flags);
283 if (!cputimer->running) {
284 cputimer->running = 1;
286 * The POSIX timer interface allows for absolute time expiry
287 * values through the TIMER_ABSTIME flag, therefore we have
288 * to synchronize the timer to the clock every time we start
291 thread_group_cputime(tsk, &sum);
292 update_gt_cputime(&cputimer->cputime, &sum);
294 *times = cputimer->cputime;
295 spin_unlock_irqrestore(&cputimer->lock, flags);
299 * Sample a process (thread group) clock for the given group_leader task.
300 * Must be called with tasklist_lock held for reading.
302 static int cpu_clock_sample_group(const clockid_t which_clock,
303 struct task_struct *p,
304 union cpu_time_count *cpu)
306 struct task_cputime cputime;
308 switch (CPUCLOCK_WHICH(which_clock)) {
312 thread_group_cputime(p, &cputime);
313 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
316 thread_group_cputime(p, &cputime);
317 cpu->cpu = cputime.utime;
320 cpu->sched = thread_group_sched_runtime(p);
327 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
329 const pid_t pid = CPUCLOCK_PID(which_clock);
331 union cpu_time_count rtn;
335 * Special case constant value for our own clocks.
336 * We don't have to do any lookup to find ourselves.
338 if (CPUCLOCK_PERTHREAD(which_clock)) {
340 * Sampling just ourselves we can do with no locking.
342 error = cpu_clock_sample(which_clock,
345 read_lock(&tasklist_lock);
346 error = cpu_clock_sample_group(which_clock,
348 read_unlock(&tasklist_lock);
352 * Find the given PID, and validate that the caller
353 * should be able to see it.
355 struct task_struct *p;
357 p = find_task_by_vpid(pid);
359 if (CPUCLOCK_PERTHREAD(which_clock)) {
360 if (same_thread_group(p, current)) {
361 error = cpu_clock_sample(which_clock,
365 read_lock(&tasklist_lock);
366 if (thread_group_leader(p) && p->signal) {
368 cpu_clock_sample_group(which_clock,
371 read_unlock(&tasklist_lock);
379 sample_to_timespec(which_clock, rtn, tp);
385 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
386 * This is called from sys_timer_create with the new timer already locked.
388 int posix_cpu_timer_create(struct k_itimer *new_timer)
391 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
392 struct task_struct *p;
394 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
397 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
398 new_timer->it.cpu.incr.sched = 0;
399 new_timer->it.cpu.expires.sched = 0;
401 read_lock(&tasklist_lock);
402 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
406 p = find_task_by_vpid(pid);
407 if (p && !same_thread_group(p, current))
412 p = current->group_leader;
414 p = find_task_by_vpid(pid);
415 if (p && !thread_group_leader(p))
419 new_timer->it.cpu.task = p;
425 read_unlock(&tasklist_lock);
431 * Clean up a CPU-clock timer that is about to be destroyed.
432 * This is called from timer deletion with the timer already locked.
433 * If we return TIMER_RETRY, it's necessary to release the timer's lock
434 * and try again. (This happens when the timer is in the middle of firing.)
436 int posix_cpu_timer_del(struct k_itimer *timer)
438 struct task_struct *p = timer->it.cpu.task;
441 if (likely(p != NULL)) {
442 read_lock(&tasklist_lock);
443 if (unlikely(p->signal == NULL)) {
445 * We raced with the reaping of the task.
446 * The deletion should have cleared us off the list.
448 BUG_ON(!list_empty(&timer->it.cpu.entry));
450 spin_lock(&p->sighand->siglock);
451 if (timer->it.cpu.firing)
454 list_del(&timer->it.cpu.entry);
455 spin_unlock(&p->sighand->siglock);
457 read_unlock(&tasklist_lock);
467 * Clean out CPU timers still ticking when a thread exited. The task
468 * pointer is cleared, and the expiry time is replaced with the residual
469 * time for later timer_gettime calls to return.
470 * This must be called with the siglock held.
472 static void cleanup_timers(struct list_head *head,
473 cputime_t utime, cputime_t stime,
474 unsigned long long sum_exec_runtime)
476 struct cpu_timer_list *timer, *next;
477 cputime_t ptime = cputime_add(utime, stime);
479 list_for_each_entry_safe(timer, next, head, entry) {
480 list_del_init(&timer->entry);
481 if (cputime_lt(timer->expires.cpu, ptime)) {
482 timer->expires.cpu = cputime_zero;
484 timer->expires.cpu = cputime_sub(timer->expires.cpu,
490 list_for_each_entry_safe(timer, next, head, entry) {
491 list_del_init(&timer->entry);
492 if (cputime_lt(timer->expires.cpu, utime)) {
493 timer->expires.cpu = cputime_zero;
495 timer->expires.cpu = cputime_sub(timer->expires.cpu,
501 list_for_each_entry_safe(timer, next, head, entry) {
502 list_del_init(&timer->entry);
503 if (timer->expires.sched < sum_exec_runtime) {
504 timer->expires.sched = 0;
506 timer->expires.sched -= sum_exec_runtime;
512 * These are both called with the siglock held, when the current thread
513 * is being reaped. When the final (leader) thread in the group is reaped,
514 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
516 void posix_cpu_timers_exit(struct task_struct *tsk)
518 cleanup_timers(tsk->cpu_timers,
519 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
522 void posix_cpu_timers_exit_group(struct task_struct *tsk)
524 struct signal_struct *const sig = tsk->signal;
526 cleanup_timers(tsk->signal->cpu_timers,
527 cputime_add(tsk->utime, sig->utime),
528 cputime_add(tsk->stime, sig->stime),
529 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
532 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
535 * That's all for this thread or process.
536 * We leave our residual in expires to be reported.
538 put_task_struct(timer->it.cpu.task);
539 timer->it.cpu.task = NULL;
540 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
541 timer->it.cpu.expires,
545 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
547 return cputime_eq(expires, cputime_zero) ||
548 cputime_gt(expires, new_exp);
551 static inline int expires_le(cputime_t expires, cputime_t new_exp)
553 return !cputime_eq(expires, cputime_zero) &&
554 cputime_le(expires, new_exp);
557 * Insert the timer on the appropriate list before any timers that
558 * expire later. This must be called with the tasklist_lock held
559 * for reading, and interrupts disabled.
561 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
563 struct task_struct *p = timer->it.cpu.task;
564 struct list_head *head, *listpos;
565 struct cpu_timer_list *const nt = &timer->it.cpu;
566 struct cpu_timer_list *next;
569 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
570 p->cpu_timers : p->signal->cpu_timers);
571 head += CPUCLOCK_WHICH(timer->it_clock);
573 BUG_ON(!irqs_disabled());
574 spin_lock(&p->sighand->siglock);
577 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
578 list_for_each_entry(next, head, entry) {
579 if (next->expires.sched > nt->expires.sched)
581 listpos = &next->entry;
584 list_for_each_entry(next, head, entry) {
585 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
587 listpos = &next->entry;
590 list_add(&nt->entry, listpos);
592 if (listpos == head) {
594 * We are the new earliest-expiring timer.
595 * If we are a thread timer, there can always
596 * be a process timer telling us to stop earlier.
599 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
600 union cpu_time_count *exp = &nt->expires;
602 switch (CPUCLOCK_WHICH(timer->it_clock)) {
606 if (expires_gt(p->cputime_expires.prof_exp,
608 p->cputime_expires.prof_exp = exp->cpu;
611 if (expires_gt(p->cputime_expires.virt_exp,
613 p->cputime_expires.virt_exp = exp->cpu;
616 if (p->cputime_expires.sched_exp == 0 ||
617 p->cputime_expires.sched_exp > exp->sched)
618 p->cputime_expires.sched_exp =
623 struct signal_struct *const sig = p->signal;
624 union cpu_time_count *exp = &timer->it.cpu.expires;
627 * For a process timer, set the cached expiration time.
629 switch (CPUCLOCK_WHICH(timer->it_clock)) {
633 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
636 sig->cputime_expires.virt_exp = exp->cpu;
639 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
642 i = sig->rlim[RLIMIT_CPU].rlim_cur;
643 if (i != RLIM_INFINITY &&
644 i <= cputime_to_secs(exp->cpu))
646 sig->cputime_expires.prof_exp = exp->cpu;
649 sig->cputime_expires.sched_exp = exp->sched;
655 spin_unlock(&p->sighand->siglock);
659 * The timer is locked, fire it and arrange for its reload.
661 static void cpu_timer_fire(struct k_itimer *timer)
663 if (unlikely(timer->sigq == NULL)) {
665 * This a special case for clock_nanosleep,
666 * not a normal timer from sys_timer_create.
668 wake_up_process(timer->it_process);
669 timer->it.cpu.expires.sched = 0;
670 } else if (timer->it.cpu.incr.sched == 0) {
672 * One-shot timer. Clear it as soon as it's fired.
674 posix_timer_event(timer, 0);
675 timer->it.cpu.expires.sched = 0;
676 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
678 * The signal did not get queued because the signal
679 * was ignored, so we won't get any callback to
680 * reload the timer. But we need to keep it
681 * ticking in case the signal is deliverable next time.
683 posix_cpu_timer_schedule(timer);
688 * Sample a process (thread group) timer for the given group_leader task.
689 * Must be called with tasklist_lock held for reading.
691 static int cpu_timer_sample_group(const clockid_t which_clock,
692 struct task_struct *p,
693 union cpu_time_count *cpu)
695 struct task_cputime cputime;
697 thread_group_cputimer(p, &cputime);
698 switch (CPUCLOCK_WHICH(which_clock)) {
702 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
705 cpu->cpu = cputime.utime;
708 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
715 * Guts of sys_timer_settime for CPU timers.
716 * This is called with the timer locked and interrupts disabled.
717 * If we return TIMER_RETRY, it's necessary to release the timer's lock
718 * and try again. (This happens when the timer is in the middle of firing.)
720 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
721 struct itimerspec *new, struct itimerspec *old)
723 struct task_struct *p = timer->it.cpu.task;
724 union cpu_time_count old_expires, new_expires, val;
727 if (unlikely(p == NULL)) {
729 * Timer refers to a dead task's clock.
734 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
736 read_lock(&tasklist_lock);
738 * We need the tasklist_lock to protect against reaping that
739 * clears p->signal. If p has just been reaped, we can no
740 * longer get any information about it at all.
742 if (unlikely(p->signal == NULL)) {
743 read_unlock(&tasklist_lock);
745 timer->it.cpu.task = NULL;
750 * Disarm any old timer after extracting its expiry time.
752 BUG_ON(!irqs_disabled());
755 spin_lock(&p->sighand->siglock);
756 old_expires = timer->it.cpu.expires;
757 if (unlikely(timer->it.cpu.firing)) {
758 timer->it.cpu.firing = -1;
761 list_del_init(&timer->it.cpu.entry);
762 spin_unlock(&p->sighand->siglock);
765 * We need to sample the current value to convert the new
766 * value from to relative and absolute, and to convert the
767 * old value from absolute to relative. To set a process
768 * timer, we need a sample to balance the thread expiry
769 * times (in arm_timer). With an absolute time, we must
770 * check if it's already passed. In short, we need a sample.
772 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
773 cpu_clock_sample(timer->it_clock, p, &val);
775 cpu_timer_sample_group(timer->it_clock, p, &val);
779 if (old_expires.sched == 0) {
780 old->it_value.tv_sec = 0;
781 old->it_value.tv_nsec = 0;
784 * Update the timer in case it has
785 * overrun already. If it has,
786 * we'll report it as having overrun
787 * and with the next reloaded timer
788 * already ticking, though we are
789 * swallowing that pending
790 * notification here to install the
793 bump_cpu_timer(timer, val);
794 if (cpu_time_before(timer->it_clock, val,
795 timer->it.cpu.expires)) {
796 old_expires = cpu_time_sub(
798 timer->it.cpu.expires, val);
799 sample_to_timespec(timer->it_clock,
803 old->it_value.tv_nsec = 1;
804 old->it_value.tv_sec = 0;
811 * We are colliding with the timer actually firing.
812 * Punt after filling in the timer's old value, and
813 * disable this firing since we are already reporting
814 * it as an overrun (thanks to bump_cpu_timer above).
816 read_unlock(&tasklist_lock);
820 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
821 cpu_time_add(timer->it_clock, &new_expires, val);
825 * Install the new expiry time (or zero).
826 * For a timer with no notification action, we don't actually
827 * arm the timer (we'll just fake it for timer_gettime).
829 timer->it.cpu.expires = new_expires;
830 if (new_expires.sched != 0 &&
831 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
832 cpu_time_before(timer->it_clock, val, new_expires)) {
833 arm_timer(timer, val);
836 read_unlock(&tasklist_lock);
839 * Install the new reload setting, and
840 * set up the signal and overrun bookkeeping.
842 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
846 * This acts as a modification timestamp for the timer,
847 * so any automatic reload attempt will punt on seeing
848 * that we have reset the timer manually.
850 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
852 timer->it_overrun_last = 0;
853 timer->it_overrun = -1;
855 if (new_expires.sched != 0 &&
856 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
857 !cpu_time_before(timer->it_clock, val, new_expires)) {
859 * The designated time already passed, so we notify
860 * immediately, even if the thread never runs to
861 * accumulate more time on this clock.
863 cpu_timer_fire(timer);
869 sample_to_timespec(timer->it_clock,
870 timer->it.cpu.incr, &old->it_interval);
875 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
877 union cpu_time_count now;
878 struct task_struct *p = timer->it.cpu.task;
882 * Easy part: convert the reload time.
884 sample_to_timespec(timer->it_clock,
885 timer->it.cpu.incr, &itp->it_interval);
887 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
888 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
892 if (unlikely(p == NULL)) {
894 * This task already died and the timer will never fire.
895 * In this case, expires is actually the dead value.
898 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
904 * Sample the clock to take the difference with the expiry time.
906 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
907 cpu_clock_sample(timer->it_clock, p, &now);
908 clear_dead = p->exit_state;
910 read_lock(&tasklist_lock);
911 if (unlikely(p->signal == NULL)) {
913 * The process has been reaped.
914 * We can't even collect a sample any more.
915 * Call the timer disarmed, nothing else to do.
918 timer->it.cpu.task = NULL;
919 timer->it.cpu.expires.sched = 0;
920 read_unlock(&tasklist_lock);
923 cpu_timer_sample_group(timer->it_clock, p, &now);
924 clear_dead = (unlikely(p->exit_state) &&
925 thread_group_empty(p));
927 read_unlock(&tasklist_lock);
930 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
931 if (timer->it.cpu.incr.sched == 0 &&
932 cpu_time_before(timer->it_clock,
933 timer->it.cpu.expires, now)) {
935 * Do-nothing timer expired and has no reload,
936 * so it's as if it was never set.
938 timer->it.cpu.expires.sched = 0;
939 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
943 * Account for any expirations and reloads that should
946 bump_cpu_timer(timer, now);
949 if (unlikely(clear_dead)) {
951 * We've noticed that the thread is dead, but
952 * not yet reaped. Take this opportunity to
955 clear_dead_task(timer, now);
959 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
960 sample_to_timespec(timer->it_clock,
961 cpu_time_sub(timer->it_clock,
962 timer->it.cpu.expires, now),
966 * The timer should have expired already, but the firing
967 * hasn't taken place yet. Say it's just about to expire.
969 itp->it_value.tv_nsec = 1;
970 itp->it_value.tv_sec = 0;
975 * Check for any per-thread CPU timers that have fired and move them off
976 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
977 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
979 static void check_thread_timers(struct task_struct *tsk,
980 struct list_head *firing)
983 struct list_head *timers = tsk->cpu_timers;
984 struct signal_struct *const sig = tsk->signal;
987 tsk->cputime_expires.prof_exp = cputime_zero;
988 while (!list_empty(timers)) {
989 struct cpu_timer_list *t = list_first_entry(timers,
990 struct cpu_timer_list,
992 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
993 tsk->cputime_expires.prof_exp = t->expires.cpu;
997 list_move_tail(&t->entry, firing);
1002 tsk->cputime_expires.virt_exp = cputime_zero;
1003 while (!list_empty(timers)) {
1004 struct cpu_timer_list *t = list_first_entry(timers,
1005 struct cpu_timer_list,
1007 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
1008 tsk->cputime_expires.virt_exp = t->expires.cpu;
1012 list_move_tail(&t->entry, firing);
1017 tsk->cputime_expires.sched_exp = 0;
1018 while (!list_empty(timers)) {
1019 struct cpu_timer_list *t = list_first_entry(timers,
1020 struct cpu_timer_list,
1022 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
1023 tsk->cputime_expires.sched_exp = t->expires.sched;
1027 list_move_tail(&t->entry, firing);
1031 * Check for the special case thread timers.
1033 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1034 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1035 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1037 if (hard != RLIM_INFINITY &&
1038 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
1040 * At the hard limit, we just die.
1041 * No need to calculate anything else now.
1043 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1046 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1048 * At the soft limit, send a SIGXCPU every second.
1050 if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1051 < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1052 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1056 "RT Watchdog Timeout: %s[%d]\n",
1057 tsk->comm, task_pid_nr(tsk));
1058 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1063 static void stop_process_timers(struct task_struct *tsk)
1065 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1066 unsigned long flags;
1068 if (!cputimer->running)
1071 spin_lock_irqsave(&cputimer->lock, flags);
1072 cputimer->running = 0;
1073 spin_unlock_irqrestore(&cputimer->lock, flags);
1076 static u32 onecputick;
1078 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1079 cputime_t *expires, cputime_t cur_time, int signo)
1081 if (cputime_eq(it->expires, cputime_zero))
1084 if (cputime_ge(cur_time, it->expires)) {
1085 if (!cputime_eq(it->incr, cputime_zero)) {
1086 it->expires = cputime_add(it->expires, it->incr);
1087 it->error += it->incr_error;
1088 if (it->error >= onecputick) {
1089 it->expires = cputime_sub(it->expires,
1091 it->error -= onecputick;
1094 it->expires = cputime_zero;
1096 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1099 if (!cputime_eq(it->expires, cputime_zero) &&
1100 (cputime_eq(*expires, cputime_zero) ||
1101 cputime_lt(it->expires, *expires))) {
1102 *expires = it->expires;
1107 * Check for any per-thread CPU timers that have fired and move them
1108 * off the tsk->*_timers list onto the firing list. Per-thread timers
1109 * have already been taken off.
1111 static void check_process_timers(struct task_struct *tsk,
1112 struct list_head *firing)
1115 struct signal_struct *const sig = tsk->signal;
1116 cputime_t utime, ptime, virt_expires, prof_expires;
1117 unsigned long long sum_sched_runtime, sched_expires;
1118 struct list_head *timers = sig->cpu_timers;
1119 struct task_cputime cputime;
1122 * Don't sample the current process CPU clocks if there are no timers.
1124 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1125 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1126 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1127 list_empty(&timers[CPUCLOCK_VIRT]) &&
1128 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1129 list_empty(&timers[CPUCLOCK_SCHED])) {
1130 stop_process_timers(tsk);
1135 * Collect the current process totals.
1137 thread_group_cputimer(tsk, &cputime);
1138 utime = cputime.utime;
1139 ptime = cputime_add(utime, cputime.stime);
1140 sum_sched_runtime = cputime.sum_exec_runtime;
1142 prof_expires = cputime_zero;
1143 while (!list_empty(timers)) {
1144 struct cpu_timer_list *tl = list_first_entry(timers,
1145 struct cpu_timer_list,
1147 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1148 prof_expires = tl->expires.cpu;
1152 list_move_tail(&tl->entry, firing);
1157 virt_expires = cputime_zero;
1158 while (!list_empty(timers)) {
1159 struct cpu_timer_list *tl = list_first_entry(timers,
1160 struct cpu_timer_list,
1162 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1163 virt_expires = tl->expires.cpu;
1167 list_move_tail(&tl->entry, firing);
1173 while (!list_empty(timers)) {
1174 struct cpu_timer_list *tl = list_first_entry(timers,
1175 struct cpu_timer_list,
1177 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1178 sched_expires = tl->expires.sched;
1182 list_move_tail(&tl->entry, firing);
1186 * Check for the special case process timers.
1188 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1190 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1193 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1194 unsigned long psecs = cputime_to_secs(ptime);
1196 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1198 * At the hard limit, we just die.
1199 * No need to calculate anything else now.
1201 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1204 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1206 * At the soft limit, send a SIGXCPU every second.
1208 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1209 if (sig->rlim[RLIMIT_CPU].rlim_cur
1210 < sig->rlim[RLIMIT_CPU].rlim_max) {
1211 sig->rlim[RLIMIT_CPU].rlim_cur++;
1214 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1215 if (cputime_eq(prof_expires, cputime_zero) ||
1216 cputime_lt(x, prof_expires)) {
1221 if (!cputime_eq(prof_expires, cputime_zero) &&
1222 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1223 cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1224 sig->cputime_expires.prof_exp = prof_expires;
1225 if (!cputime_eq(virt_expires, cputime_zero) &&
1226 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1227 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1228 sig->cputime_expires.virt_exp = virt_expires;
1229 if (sched_expires != 0 &&
1230 (sig->cputime_expires.sched_exp == 0 ||
1231 sig->cputime_expires.sched_exp > sched_expires))
1232 sig->cputime_expires.sched_exp = sched_expires;
1236 * This is called from the signal code (via do_schedule_next_timer)
1237 * when the last timer signal was delivered and we have to reload the timer.
1239 void posix_cpu_timer_schedule(struct k_itimer *timer)
1241 struct task_struct *p = timer->it.cpu.task;
1242 union cpu_time_count now;
1244 if (unlikely(p == NULL))
1246 * The task was cleaned up already, no future firings.
1251 * Fetch the current sample and update the timer's expiry time.
1253 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1254 cpu_clock_sample(timer->it_clock, p, &now);
1255 bump_cpu_timer(timer, now);
1256 if (unlikely(p->exit_state)) {
1257 clear_dead_task(timer, now);
1260 read_lock(&tasklist_lock); /* arm_timer needs it. */
1262 read_lock(&tasklist_lock);
1263 if (unlikely(p->signal == NULL)) {
1265 * The process has been reaped.
1266 * We can't even collect a sample any more.
1269 timer->it.cpu.task = p = NULL;
1270 timer->it.cpu.expires.sched = 0;
1272 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1274 * We've noticed that the thread is dead, but
1275 * not yet reaped. Take this opportunity to
1276 * drop our task ref.
1278 clear_dead_task(timer, now);
1281 cpu_timer_sample_group(timer->it_clock, p, &now);
1282 bump_cpu_timer(timer, now);
1283 /* Leave the tasklist_lock locked for the call below. */
1287 * Now re-arm for the new expiry time.
1289 arm_timer(timer, now);
1292 read_unlock(&tasklist_lock);
1295 timer->it_overrun_last = timer->it_overrun;
1296 timer->it_overrun = -1;
1297 ++timer->it_requeue_pending;
1301 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1303 * @cputime: The struct to compare.
1305 * Checks @cputime to see if all fields are zero. Returns true if all fields
1306 * are zero, false if any field is nonzero.
1308 static inline int task_cputime_zero(const struct task_cputime *cputime)
1310 if (cputime_eq(cputime->utime, cputime_zero) &&
1311 cputime_eq(cputime->stime, cputime_zero) &&
1312 cputime->sum_exec_runtime == 0)
1318 * task_cputime_expired - Compare two task_cputime entities.
1320 * @sample: The task_cputime structure to be checked for expiration.
1321 * @expires: Expiration times, against which @sample will be checked.
1323 * Checks @sample against @expires to see if any field of @sample has expired.
1324 * Returns true if any field of the former is greater than the corresponding
1325 * field of the latter if the latter field is set. Otherwise returns false.
1327 static inline int task_cputime_expired(const struct task_cputime *sample,
1328 const struct task_cputime *expires)
1330 if (!cputime_eq(expires->utime, cputime_zero) &&
1331 cputime_ge(sample->utime, expires->utime))
1333 if (!cputime_eq(expires->stime, cputime_zero) &&
1334 cputime_ge(cputime_add(sample->utime, sample->stime),
1337 if (expires->sum_exec_runtime != 0 &&
1338 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1344 * fastpath_timer_check - POSIX CPU timers fast path.
1346 * @tsk: The task (thread) being checked.
1348 * Check the task and thread group timers. If both are zero (there are no
1349 * timers set) return false. Otherwise snapshot the task and thread group
1350 * timers and compare them with the corresponding expiration times. Return
1351 * true if a timer has expired, else return false.
1353 static inline int fastpath_timer_check(struct task_struct *tsk)
1355 struct signal_struct *sig;
1357 /* tsk == current, ensure it is safe to use ->signal/sighand */
1358 if (unlikely(tsk->exit_state))
1361 if (!task_cputime_zero(&tsk->cputime_expires)) {
1362 struct task_cputime task_sample = {
1363 .utime = tsk->utime,
1364 .stime = tsk->stime,
1365 .sum_exec_runtime = tsk->se.sum_exec_runtime
1368 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1373 if (!task_cputime_zero(&sig->cputime_expires)) {
1374 struct task_cputime group_sample;
1376 thread_group_cputimer(tsk, &group_sample);
1377 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1381 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1385 * This is called from the timer interrupt handler. The irq handler has
1386 * already updated our counts. We need to check if any timers fire now.
1387 * Interrupts are disabled.
1389 void run_posix_cpu_timers(struct task_struct *tsk)
1392 struct k_itimer *timer, *next;
1394 BUG_ON(!irqs_disabled());
1397 * The fast path checks that there are no expired thread or thread
1398 * group timers. If that's so, just return.
1400 if (!fastpath_timer_check(tsk))
1403 spin_lock(&tsk->sighand->siglock);
1405 * Here we take off tsk->signal->cpu_timers[N] and
1406 * tsk->cpu_timers[N] all the timers that are firing, and
1407 * put them on the firing list.
1409 check_thread_timers(tsk, &firing);
1410 check_process_timers(tsk, &firing);
1413 * We must release these locks before taking any timer's lock.
1414 * There is a potential race with timer deletion here, as the
1415 * siglock now protects our private firing list. We have set
1416 * the firing flag in each timer, so that a deletion attempt
1417 * that gets the timer lock before we do will give it up and
1418 * spin until we've taken care of that timer below.
1420 spin_unlock(&tsk->sighand->siglock);
1423 * Now that all the timers on our list have the firing flag,
1424 * noone will touch their list entries but us. We'll take
1425 * each timer's lock before clearing its firing flag, so no
1426 * timer call will interfere.
1428 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1431 spin_lock(&timer->it_lock);
1432 list_del_init(&timer->it.cpu.entry);
1433 cpu_firing = timer->it.cpu.firing;
1434 timer->it.cpu.firing = 0;
1436 * The firing flag is -1 if we collided with a reset
1437 * of the timer, which already reported this
1438 * almost-firing as an overrun. So don't generate an event.
1440 if (likely(cpu_firing >= 0))
1441 cpu_timer_fire(timer);
1442 spin_unlock(&timer->it_lock);
1447 * Set one of the process-wide special case CPU timers.
1448 * The tsk->sighand->siglock must be held by the caller.
1449 * The *newval argument is relative and we update it to be absolute, *oldval
1450 * is absolute and we update it to be relative.
1452 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1453 cputime_t *newval, cputime_t *oldval)
1455 union cpu_time_count now;
1456 struct list_head *head;
1458 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1459 cpu_timer_sample_group(clock_idx, tsk, &now);
1462 if (!cputime_eq(*oldval, cputime_zero)) {
1463 if (cputime_le(*oldval, now.cpu)) {
1464 /* Just about to fire. */
1465 *oldval = cputime_one_jiffy;
1467 *oldval = cputime_sub(*oldval, now.cpu);
1471 if (cputime_eq(*newval, cputime_zero))
1473 *newval = cputime_add(*newval, now.cpu);
1476 * If the RLIMIT_CPU timer will expire before the
1477 * ITIMER_PROF timer, we have nothing else to do.
1479 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1480 < cputime_to_secs(*newval))
1485 * Check whether there are any process timers already set to fire
1486 * before this one. If so, we don't have anything more to do.
1488 head = &tsk->signal->cpu_timers[clock_idx];
1489 if (list_empty(head) ||
1490 cputime_ge(list_first_entry(head,
1491 struct cpu_timer_list, entry)->expires.cpu,
1493 switch (clock_idx) {
1495 tsk->signal->cputime_expires.prof_exp = *newval;
1498 tsk->signal->cputime_expires.virt_exp = *newval;
1504 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1505 struct timespec *rqtp, struct itimerspec *it)
1507 struct k_itimer timer;
1511 * Set up a temporary timer and then wait for it to go off.
1513 memset(&timer, 0, sizeof timer);
1514 spin_lock_init(&timer.it_lock);
1515 timer.it_clock = which_clock;
1516 timer.it_overrun = -1;
1517 error = posix_cpu_timer_create(&timer);
1518 timer.it_process = current;
1520 static struct itimerspec zero_it;
1522 memset(it, 0, sizeof *it);
1523 it->it_value = *rqtp;
1525 spin_lock_irq(&timer.it_lock);
1526 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1528 spin_unlock_irq(&timer.it_lock);
1532 while (!signal_pending(current)) {
1533 if (timer.it.cpu.expires.sched == 0) {
1535 * Our timer fired and was reset.
1537 spin_unlock_irq(&timer.it_lock);
1542 * Block until cpu_timer_fire (or a signal) wakes us.
1544 __set_current_state(TASK_INTERRUPTIBLE);
1545 spin_unlock_irq(&timer.it_lock);
1547 spin_lock_irq(&timer.it_lock);
1551 * We were interrupted by a signal.
1553 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1554 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1555 spin_unlock_irq(&timer.it_lock);
1557 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1559 * It actually did fire already.
1564 error = -ERESTART_RESTARTBLOCK;
1570 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1571 struct timespec *rqtp, struct timespec __user *rmtp)
1573 struct restart_block *restart_block =
1574 ¤t_thread_info()->restart_block;
1575 struct itimerspec it;
1579 * Diagnose required errors first.
1581 if (CPUCLOCK_PERTHREAD(which_clock) &&
1582 (CPUCLOCK_PID(which_clock) == 0 ||
1583 CPUCLOCK_PID(which_clock) == current->pid))
1586 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1588 if (error == -ERESTART_RESTARTBLOCK) {
1590 if (flags & TIMER_ABSTIME)
1591 return -ERESTARTNOHAND;
1593 * Report back to the user the time still remaining.
1595 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1598 restart_block->fn = posix_cpu_nsleep_restart;
1599 restart_block->arg0 = which_clock;
1600 restart_block->arg1 = (unsigned long) rmtp;
1601 restart_block->arg2 = rqtp->tv_sec;
1602 restart_block->arg3 = rqtp->tv_nsec;
1607 long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1609 clockid_t which_clock = restart_block->arg0;
1610 struct timespec __user *rmtp;
1612 struct itimerspec it;
1615 rmtp = (struct timespec __user *) restart_block->arg1;
1616 t.tv_sec = restart_block->arg2;
1617 t.tv_nsec = restart_block->arg3;
1619 restart_block->fn = do_no_restart_syscall;
1620 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1622 if (error == -ERESTART_RESTARTBLOCK) {
1624 * Report back to the user the time still remaining.
1626 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1629 restart_block->fn = posix_cpu_nsleep_restart;
1630 restart_block->arg0 = which_clock;
1631 restart_block->arg1 = (unsigned long) rmtp;
1632 restart_block->arg2 = t.tv_sec;
1633 restart_block->arg3 = t.tv_nsec;
1640 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1641 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1643 static int process_cpu_clock_getres(const clockid_t which_clock,
1644 struct timespec *tp)
1646 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1648 static int process_cpu_clock_get(const clockid_t which_clock,
1649 struct timespec *tp)
1651 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1653 static int process_cpu_timer_create(struct k_itimer *timer)
1655 timer->it_clock = PROCESS_CLOCK;
1656 return posix_cpu_timer_create(timer);
1658 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1659 struct timespec *rqtp,
1660 struct timespec __user *rmtp)
1662 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1664 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1668 static int thread_cpu_clock_getres(const clockid_t which_clock,
1669 struct timespec *tp)
1671 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1673 static int thread_cpu_clock_get(const clockid_t which_clock,
1674 struct timespec *tp)
1676 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1678 static int thread_cpu_timer_create(struct k_itimer *timer)
1680 timer->it_clock = THREAD_CLOCK;
1681 return posix_cpu_timer_create(timer);
1683 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1684 struct timespec *rqtp, struct timespec __user *rmtp)
1688 static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1693 static __init int init_posix_cpu_timers(void)
1695 struct k_clock process = {
1696 .clock_getres = process_cpu_clock_getres,
1697 .clock_get = process_cpu_clock_get,
1698 .clock_set = do_posix_clock_nosettime,
1699 .timer_create = process_cpu_timer_create,
1700 .nsleep = process_cpu_nsleep,
1701 .nsleep_restart = process_cpu_nsleep_restart,
1703 struct k_clock thread = {
1704 .clock_getres = thread_cpu_clock_getres,
1705 .clock_get = thread_cpu_clock_get,
1706 .clock_set = do_posix_clock_nosettime,
1707 .timer_create = thread_cpu_timer_create,
1708 .nsleep = thread_cpu_nsleep,
1709 .nsleep_restart = thread_cpu_nsleep_restart,
1713 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1714 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1716 cputime_to_timespec(cputime_one_jiffy, &ts);
1717 onecputick = ts.tv_nsec;
1718 WARN_ON(ts.tv_sec != 0);
1722 __initcall(init_posix_cpu_timers);