svcrpc: fix handling of garbage args
[safe/jmp/linux-2.6] / kernel / posix-cpu-timers.c
index 9d9169a..f1525ad 100644 (file)
@@ -4,8 +4,9 @@
 
 #include <linux/sched.h>
 #include <linux/posix-timers.h>
-#include <asm/uaccess.h>
 #include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
 
 static int check_clock(const clockid_t which_clock)
 {
@@ -20,9 +21,9 @@ static int check_clock(const clockid_t which_clock)
                return 0;
 
        read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
-                  p->tgid != current->tgid : p->tgid != pid)) {
+       p = find_task_by_vpid(pid);
+       if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
+                  same_thread_group(p, current) : thread_group_leader(p))) {
                error = -EINVAL;
        }
        read_unlock(&tasklist_lock);
@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
                               union cpu_time_count cpu,
                               struct timespec *tp)
 {
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               tp->tv_sec = div_long_long_rem(cpu.sched,
-                                              NSEC_PER_SEC, &tp->tv_nsec);
-       } else {
+       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+               *tp = ns_to_timespec(cpu.sched);
+       else
                cputime_to_timespec(cpu.cpu, tp);
-       }
 }
 
 static inline int cpu_time_before(const clockid_t which_clock,
@@ -88,6 +87,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
 }
 
 /*
+ * Divide and limit the result to res >= 1
+ *
+ * This is necessary to prevent signal delivery starvation, when the result of
+ * the division would be rounded down to 0.
+ */
+static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
+{
+       cputime_t res = cputime_div(time, div);
+
+       return max_t(cputime_t, res, 1);
+}
+
+/*
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
  */
@@ -148,7 +160,7 @@ static inline cputime_t virt_ticks(struct task_struct *p)
 }
 static inline unsigned long long sched_ns(struct task_struct *p)
 {
-       return (p == current) ? current_sched_time(p) : p->sched_time;
+       return task_sched_runtime(p);
 }
 
 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
@@ -233,10 +245,10 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,
                } while (t != p);
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = p->signal->sched_time;
+               cpu->sched = p->signal->sum_sched_runtime;
                /* Add in each other live thread.  */
                while ((t = next_thread(t)) != p) {
-                       cpu->sched += t->sched_time;
+                       cpu->sched += t->se.sum_exec_runtime;
                }
                cpu->sched += sched_ns(p);
                break;
@@ -291,20 +303,25 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
                 * should be able to see it.
                 */
                struct task_struct *p;
-               read_lock(&tasklist_lock);
-               p = find_task_by_pid(pid);
+               rcu_read_lock();
+               p = find_task_by_vpid(pid);
                if (p) {
                        if (CPUCLOCK_PERTHREAD(which_clock)) {
-                               if (p->tgid == current->tgid) {
+                               if (same_thread_group(p, current)) {
                                        error = cpu_clock_sample(which_clock,
                                                                 p, &rtn);
                                }
-                       } else if (p->tgid == pid && p->signal) {
-                               error = cpu_clock_sample_group(which_clock,
-                                                              p, &rtn);
+                       } else {
+                               read_lock(&tasklist_lock);
+                               if (thread_group_leader(p) && p->signal) {
+                                       error =
+                                           cpu_clock_sample_group(which_clock,
+                                                                  p, &rtn);
+                               }
+                               read_unlock(&tasklist_lock);
                        }
                }
-               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
        }
 
        if (error)
@@ -336,16 +353,16 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
                if (pid == 0) {
                        p = current;
                } else {
-                       p = find_task_by_pid(pid);
-                       if (p && p->tgid != current->tgid)
+                       p = find_task_by_vpid(pid);
+                       if (p && !same_thread_group(p, current))
                                p = NULL;
                }
        } else {
                if (pid == 0) {
                        p = current->group_leader;
                } else {
-                       p = find_task_by_pid(pid);
-                       if (p && p->tgid != pid)
+                       p = find_task_by_vpid(pid);
+                       if (p && !thread_group_leader(p))
                                p = NULL;
                }
        }
@@ -404,7 +421,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
  */
 static void cleanup_timers(struct list_head *head,
                           cputime_t utime, cputime_t stime,
-                          unsigned long long sched_time)
+                          unsigned long long sum_exec_runtime)
 {
        struct cpu_timer_list *timer, *next;
        cputime_t ptime = cputime_add(utime, stime);
@@ -433,10 +450,10 @@ static void cleanup_timers(struct list_head *head,
        ++head;
        list_for_each_entry_safe(timer, next, head, entry) {
                list_del_init(&timer->entry);
-               if (timer->expires.sched < sched_time) {
+               if (timer->expires.sched < sum_exec_runtime) {
                        timer->expires.sched = 0;
                } else {
-                       timer->expires.sched -= sched_time;
+                       timer->expires.sched -= sum_exec_runtime;
                }
        }
 }
@@ -449,7 +466,7 @@ static void cleanup_timers(struct list_head *head,
 void posix_cpu_timers_exit(struct task_struct *tsk)
 {
        cleanup_timers(tsk->cpu_timers,
-                      tsk->utime, tsk->stime, tsk->sched_time);
+                      tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
 
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
@@ -457,7 +474,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
        cleanup_timers(tsk->signal->cpu_timers,
                       cputime_add(tsk->utime, tsk->signal->utime),
                       cputime_add(tsk->stime, tsk->signal->stime),
-                      tsk->sched_time + tsk->signal->sched_time);
+                    tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
 }
 
 
@@ -483,8 +500,8 @@ static void process_timer_rebalance(struct task_struct *p,
                BUG();
                break;
        case CPUCLOCK_PROF:
-               left = cputime_div(cputime_sub(expires.cpu, val.cpu),
-                                  nthreads);
+               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
+                                      nthreads);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(prof_ticks(t), left);
@@ -498,8 +515,8 @@ static void process_timer_rebalance(struct task_struct *p,
                } while (t != p);
                break;
        case CPUCLOCK_VIRT:
-               left = cputime_div(cputime_sub(expires.cpu, val.cpu),
-                                  nthreads);
+               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
+                                      nthreads);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(virt_ticks(t), left);
@@ -515,9 +532,10 @@ static void process_timer_rebalance(struct task_struct *p,
        case CPUCLOCK_SCHED:
                nsleft = expires.sched - val.sched;
                do_div(nsleft, nthreads);
+               nsleft = max_t(unsigned long long, nsleft, 1);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
-                               ns = t->sched_time + nsleft;
+                               ns = t->se.sum_exec_runtime + nsleft;
                                if (t->it_sched_expires == 0 ||
                                    t->it_sched_expires > ns) {
                                        t->it_sched_expires = ns;
@@ -555,9 +573,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
        struct cpu_timer_list *next;
        unsigned long i;
 
-       if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
-               return;
-
        head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
                p->cpu_timers : p->signal->cpu_timers);
        head += CPUCLOCK_WHICH(timer->it_clock);
@@ -951,11 +966,12 @@ static void check_thread_timers(struct task_struct *tsk,
 {
        int maxfire;
        struct list_head *timers = tsk->cpu_timers;
+       struct signal_struct *const sig = tsk->signal;
 
        maxfire = 20;
        tsk->it_prof_expires = cputime_zero;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
@@ -970,7 +986,7 @@ static void check_thread_timers(struct task_struct *tsk,
        maxfire = 20;
        tsk->it_virt_expires = cputime_zero;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
@@ -985,16 +1001,45 @@ static void check_thread_timers(struct task_struct *tsk,
        maxfire = 20;
        tsk->it_sched_expires = 0;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (!--maxfire || tsk->sched_time < t->expires.sched) {
+               if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
                        tsk->it_sched_expires = t->expires.sched;
                        break;
                }
                t->firing = 1;
                list_move_tail(&t->entry, firing);
        }
+
+       /*
+        * Check for the special case thread timers.
+        */
+       if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
+               unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
+               unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
+
+               if (hard != RLIM_INFINITY &&
+                   tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the hard limit, we just die.
+                        * No need to calculate anything else now.
+                        */
+                       __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+                       return;
+               }
+               if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+                       /*
+                        * At the soft limit, send a SIGXCPU every second.
+                        */
+                       if (sig->rlim[RLIMIT_RTTIME].rlim_cur
+                           < sig->rlim[RLIMIT_RTTIME].rlim_max) {
+                               sig->rlim[RLIMIT_RTTIME].rlim_cur +=
+                                                               USEC_PER_SEC;
+                       }
+                       __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+               }
+       }
 }
 
 /*
@@ -1008,7 +1053,7 @@ static void check_process_timers(struct task_struct *tsk,
        int maxfire;
        struct signal_struct *const sig = tsk->signal;
        cputime_t utime, stime, ptime, virt_expires, prof_expires;
-       unsigned long long sched_time, sched_expires;
+       unsigned long long sum_sched_runtime, sched_expires;
        struct task_struct *t;
        struct list_head *timers = sig->cpu_timers;
 
@@ -1028,12 +1073,12 @@ static void check_process_timers(struct task_struct *tsk,
         */
        utime = sig->utime;
        stime = sig->stime;
-       sched_time = sig->sched_time;
+       sum_sched_runtime = sig->sum_sched_runtime;
        t = tsk;
        do {
                utime = cputime_add(utime, t->utime);
                stime = cputime_add(stime, t->stime);
-               sched_time += t->sched_time;
+               sum_sched_runtime += t->se.sum_exec_runtime;
                t = next_thread(t);
        } while (t != tsk);
        ptime = cputime_add(utime, stime);
@@ -1041,45 +1086,45 @@ static void check_process_timers(struct task_struct *tsk,
        maxfire = 20;
        prof_expires = cputime_zero;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *tl = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
-                       prof_expires = t->expires.cpu;
+               if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
+                       prof_expires = tl->expires.cpu;
                        break;
                }
-               t->firing = 1;
-               list_move_tail(&t->entry, firing);
+               tl->firing = 1;
+               list_move_tail(&tl->entry, firing);
        }
 
        ++timers;
        maxfire = 20;
        virt_expires = cputime_zero;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *tl = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
-                       virt_expires = t->expires.cpu;
+               if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
+                       virt_expires = tl->expires.cpu;
                        break;
                }
-               t->firing = 1;
-               list_move_tail(&t->entry, firing);
+               tl->firing = 1;
+               list_move_tail(&tl->entry, firing);
        }
 
        ++timers;
        maxfire = 20;
        sched_expires = 0;
        while (!list_empty(timers)) {
-               struct cpu_timer_list *t = list_entry(timers->next,
+               struct cpu_timer_list *tl = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
-               if (!--maxfire || sched_time < t->expires.sched) {
-                       sched_expires = t->expires.sched;
+               if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
+                       sched_expires = tl->expires.sched;
                        break;
                }
-               t->firing = 1;
-               list_move_tail(&t->entry, firing);
+               tl->firing = 1;
+               list_move_tail(&tl->entry, firing);
        }
 
        /*
@@ -1162,12 +1207,13 @@ static void check_process_timers(struct task_struct *tsk,
 
                prof_left = cputime_sub(prof_expires, utime);
                prof_left = cputime_sub(prof_left, stime);
-               prof_left = cputime_div(prof_left, nthreads);
+               prof_left = cputime_div_non_zero(prof_left, nthreads);
                virt_left = cputime_sub(virt_expires, utime);
-               virt_left = cputime_div(virt_left, nthreads);
+               virt_left = cputime_div_non_zero(virt_left, nthreads);
                if (sched_expires) {
-                       sched_left = sched_expires - sched_time;
+                       sched_left = sched_expires - sum_sched_runtime;
                        do_div(sched_left, nthreads);
+                       sched_left = max_t(unsigned long long, sched_left, 1);
                } else {
                        sched_left = 0;
                }
@@ -1191,7 +1237,7 @@ static void check_process_timers(struct task_struct *tsk,
                                t->it_virt_expires = ticks;
                        }
 
-                       sched = t->sched_time + sched_left;
+                       sched = t->se.sum_exec_runtime + sched_left;
                        if (sched_expires && (t->it_sched_expires == 0 ||
                                              t->it_sched_expires > sched)) {
                                t->it_sched_expires = sched;
@@ -1283,35 +1329,35 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
        if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
            (tsk->it_sched_expires == 0 ||
-            tsk->sched_time < tsk->it_sched_expires))
+            tsk->se.sum_exec_runtime < tsk->it_sched_expires))
                return;
 
 #undef UNEXPIRED
 
-       BUG_ON(tsk->exit_state);
-
        /*
         * Double-check with locks held.
         */
        read_lock(&tasklist_lock);
-       spin_lock(&tsk->sighand->siglock);
+       if (likely(tsk->signal != NULL)) {
+               spin_lock(&tsk->sighand->siglock);
 
-       /*
-        * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-        * all the timers that are firing, and put them on the firing list.
-        */
-       check_thread_timers(tsk, &firing);
-       check_process_timers(tsk, &firing);
+               /*
+                * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+                * all the timers that are firing, and put them on the firing list.
+                */
+               check_thread_timers(tsk, &firing);
+               check_process_timers(tsk, &firing);
 
-       /*
-        * We must release these locks before taking any timer's lock.
-        * There is a potential race with timer deletion here, as the
-        * siglock now protects our private firing list.  We have set
-        * the firing flag in each timer, so that a deletion attempt
-        * that gets the timer lock before we do will give it up and
-        * spin until we've taken care of that timer below.
-        */
-       spin_unlock(&tsk->sighand->siglock);
+               /*
+                * We must release these locks before taking any timer's lock.
+                * There is a potential race with timer deletion here, as the
+                * siglock now protects our private firing list.  We have set
+                * the firing flag in each timer, so that a deletion attempt
+                * that gets the timer lock before we do will give it up and
+                * spin until we've taken care of that timer below.
+                */
+               spin_unlock(&tsk->sighand->siglock);
+       }
        read_unlock(&tasklist_lock);
 
        /*
@@ -1383,7 +1429,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
         */
        head = &tsk->signal->cpu_timers[clock_idx];
        if (list_empty(head) ||
-           cputime_ge(list_entry(head->next,
+           cputime_ge(list_first_entry(head,
                                  struct cpu_timer_list, entry)->expires.cpu,
                       *newval)) {
                /*
@@ -1396,25 +1442,13 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
        }
 }
 
-static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
-
-int posix_cpu_nsleep(const clockid_t which_clock, int flags,
-                    struct timespec *rqtp, struct timespec __user *rmtp)
+static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+                           struct timespec *rqtp, struct itimerspec *it)
 {
-       struct restart_block *restart_block =
-           &current_thread_info()->restart_block;
        struct k_itimer timer;
        int error;
 
        /*
-        * Diagnose required errors first.
-        */
-       if (CPUCLOCK_PERTHREAD(which_clock) &&
-           (CPUCLOCK_PID(which_clock) == 0 ||
-            CPUCLOCK_PID(which_clock) == current->pid))
-               return -EINVAL;
-
-       /*
         * Set up a temporary timer and then wait for it to go off.
         */
        memset(&timer, 0, sizeof timer);
@@ -1425,11 +1459,12 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags,
        timer.it_process = current;
        if (!error) {
                static struct itimerspec zero_it;
-               struct itimerspec it = { .it_value = *rqtp,
-                                        .it_interval = {} };
+
+               memset(it, 0, sizeof *it);
+               it->it_value = *rqtp;
 
                spin_lock_irq(&timer.it_lock);
-               error = posix_cpu_timer_set(&timer, flags, &it, NULL);
+               error = posix_cpu_timer_set(&timer, flags, it, NULL);
                if (error) {
                        spin_unlock_irq(&timer.it_lock);
                        return error;
@@ -1457,49 +1492,89 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags,
                 * We were interrupted by a signal.
                 */
                sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
-               posix_cpu_timer_set(&timer, 0, &zero_it, &it);
+               posix_cpu_timer_set(&timer, 0, &zero_it, it);
                spin_unlock_irq(&timer.it_lock);
 
-               if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
+               if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
                        /*
                         * It actually did fire already.
                         */
                        return 0;
                }
 
+               error = -ERESTART_RESTARTBLOCK;
+       }
+
+       return error;
+}
+
+int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+                    struct timespec *rqtp, struct timespec __user *rmtp)
+{
+       struct restart_block *restart_block =
+           &current_thread_info()->restart_block;
+       struct itimerspec it;
+       int error;
+
+       /*
+        * Diagnose required errors first.
+        */
+       if (CPUCLOCK_PERTHREAD(which_clock) &&
+           (CPUCLOCK_PID(which_clock) == 0 ||
+            CPUCLOCK_PID(which_clock) == current->pid))
+               return -EINVAL;
+
+       error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
+
+       if (error == -ERESTART_RESTARTBLOCK) {
+
+               if (flags & TIMER_ABSTIME)
+                       return -ERESTARTNOHAND;
                /*
-                * Report back to the user the time still remaining.
-                */
-               if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
-                   copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
                        return -EFAULT;
 
-               restart_block->fn = posix_cpu_clock_nanosleep_restart;
-               /* Caller already set restart_block->arg1 */
+               restart_block->fn = posix_cpu_nsleep_restart;
                restart_block->arg0 = which_clock;
                restart_block->arg1 = (unsigned long) rmtp;
                restart_block->arg2 = rqtp->tv_sec;
                restart_block->arg3 = rqtp->tv_nsec;
-
-               error = -ERESTART_RESTARTBLOCK;
        }
-
        return error;
 }
 
-static long
-posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
+long posix_cpu_nsleep_restart(struct restart_block *restart_block)
 {
        clockid_t which_clock = restart_block->arg0;
        struct timespec __user *rmtp;
        struct timespec t;
+       struct itimerspec it;
+       int error;
 
        rmtp = (struct timespec __user *) restart_block->arg1;
        t.tv_sec = restart_block->arg2;
        t.tv_nsec = restart_block->arg3;
 
        restart_block->fn = do_no_restart_syscall;
-       return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp);
+       error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
+
+       if (error == -ERESTART_RESTARTBLOCK) {
+               /*
+                * Report back to the user the time still remaining.
+                */
+               if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
+                       return -EFAULT;
+
+               restart_block->fn = posix_cpu_nsleep_restart;
+               restart_block->arg0 = which_clock;
+               restart_block->arg1 = (unsigned long) rmtp;
+               restart_block->arg2 = t.tv_sec;
+               restart_block->arg3 = t.tv_nsec;
+       }
+       return error;
+
 }
 
 
@@ -1527,6 +1602,10 @@ static int process_cpu_nsleep(const clockid_t which_clock, int flags,
 {
        return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
 }
+static long process_cpu_nsleep_restart(struct restart_block *restart_block)
+{
+       return -EINVAL;
+}
 static int thread_cpu_clock_getres(const clockid_t which_clock,
                                   struct timespec *tp)
 {
@@ -1547,6 +1626,10 @@ static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
 {
        return -EINVAL;
 }
+static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
+{
+       return -EINVAL;
+}
 
 static __init int init_posix_cpu_timers(void)
 {
@@ -1556,6 +1639,7 @@ static __init int init_posix_cpu_timers(void)
                .clock_set = do_posix_clock_nosettime,
                .timer_create = process_cpu_timer_create,
                .nsleep = process_cpu_nsleep,
+               .nsleep_restart = process_cpu_nsleep_restart,
        };
        struct k_clock thread = {
                .clock_getres = thread_cpu_clock_getres,
@@ -1563,6 +1647,7 @@ static __init int init_posix_cpu_timers(void)
                .clock_set = do_posix_clock_nosettime,
                .timer_create = thread_cpu_timer_create,
                .nsleep = thread_cpu_nsleep,
+               .nsleep_restart = thread_cpu_nsleep_restart,
        };
 
        register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);