ptrace: __ptrace_detach: do __wake_up_parent() if we reap the tracee
[safe/jmp/linux-2.6] / kernel / sched.c
index 0ac9053..0d0361b 100644 (file)
@@ -5092,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
  */
 void account_process_tick(struct task_struct *p, int user_tick)
 {
-       cputime_t one_jiffy = jiffies_to_cputime(1);
-       cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
        if (user_tick)
-               account_user_time(p, one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
-               account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+               account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
                                    one_jiffy_scaled);
        else
-               account_idle_time(one_jiffy);
+               account_idle_time(cputime_one_jiffy);
 }
 
 /*
@@ -10378,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 }
 
 static int
-cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                     struct task_struct *tsk)
+cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
 #ifdef CONFIG_RT_GROUP_SCHED
        if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10389,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
        if (tsk->sched_class != &fair_sched_class)
                return -EINVAL;
 #endif
+       return 0;
+}
 
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+                     struct task_struct *tsk, bool threadgroup)
+{
+       int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
+       if (retval)
+               return retval;
+       if (threadgroup) {
+               struct task_struct *c;
+               rcu_read_lock();
+               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+                       retval = cpu_cgroup_can_attach_task(cgrp, c);
+                       if (retval) {
+                               rcu_read_unlock();
+                               return retval;
+                       }
+               }
+               rcu_read_unlock();
+       }
        return 0;
 }
 
 static void
 cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                       struct cgroup *old_cont, struct task_struct *tsk)
+                 struct cgroup *old_cont, struct task_struct *tsk,
+                 bool threadgroup)
 {
        sched_move_task(tsk);
+       if (threadgroup) {
+               struct task_struct *c;
+               rcu_read_lock();
+               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
+                       sched_move_task(c);
+               }
+               rcu_read_unlock();
+       }
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED