Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[safe/jmp/linux-2.6] / kernel / sched.c
index 242d0d4..8e2558c 100644 (file)
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
        ktime_t now;
 
-       if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
+       if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
 
        if (hrtimer_active(&rt_b->rt_period_timer))
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
        if (!sched_feat(SYNC_WAKEUPS))
                sync = 0;
 
-       if (!sync) {
-               if (current->se.avg_overlap < sysctl_sched_migration_cost &&
-                         p->se.avg_overlap < sysctl_sched_migration_cost)
-                       sync = 1;
-       } else {
-               if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
-                         p->se.avg_overlap >= sysctl_sched_migration_cost)
-                       sync = 0;
-       }
-
 #ifdef CONFIG_SMP
        if (sched_feat(LB_WAKEUP_UPDATE)) {
                struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
        int cpu = smp_processor_id();
 
        if (stop_tick) {
-               cpumask_set_cpu(cpu, nohz.cpu_mask);
                cpu_rq(cpu)->in_nohz_recently = 1;
 
-               /*
-                * If we are going offline and still the leader, give up!
-                */
-               if (!cpu_active(cpu) &&
-                   atomic_read(&nohz.load_balancer) == cpu) {
+               if (!cpu_active(cpu)) {
+                       if (atomic_read(&nohz.load_balancer) != cpu)
+                               return 0;
+
+                       /*
+                        * If we are going offline and still the leader,
+                        * give up!
+                        */
                        if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
                                BUG();
+
                        return 0;
                }
 
+               cpumask_set_cpu(cpu, nohz.cpu_mask);
+
                /* time for ilb owner also to sleep */
                if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
                        if (atomic_read(&nohz.load_balancer) == cpu)
@@ -4697,8 +4692,8 @@ EXPORT_SYMBOL(default_wake_function);
  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                            int nr_exclusive, int sync, void *key)
+void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+                       int nr_exclusive, int sync, void *key)
 {
        wait_queue_t *curr, *next;
 
@@ -6949,20 +6944,26 @@ static void free_rootdomain(struct root_domain *rd)
 
 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
 {
+       struct root_domain *old_rd = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
-               struct root_domain *old_rd = rq->rd;
+               old_rd = rq->rd;
 
                if (cpumask_test_cpu(rq->cpu, old_rd->online))
                        set_rq_offline(rq);
 
                cpumask_clear_cpu(rq->cpu, old_rd->span);
 
-               if (atomic_dec_and_test(&old_rd->refcount))
-                       free_rootdomain(old_rd);
+               /*
+                * If we dont want to free the old_rt yet then
+                * set old_rd to NULL to skip the freeing later
+                * in this function:
+                */
+               if (!atomic_dec_and_test(&old_rd->refcount))
+                       old_rd = NULL;
        }
 
        atomic_inc(&rd->refcount);
@@ -6973,6 +6974,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
                set_rq_online(rq);
 
        spin_unlock_irqrestore(&rq->lock, flags);
+
+       if (old_rd)
+               free_rootdomain(old_rd);
 }
 
 static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
@@ -9220,6 +9224,16 @@ static int sched_rt_global_constraints(void)
 
        return ret;
 }
+
+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+{
+       /* Don't accept realtime tasks when there is no way for them to run */
+       if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+               return 0;
+
+       return 1;
+}
+
 #else /* !CONFIG_RT_GROUP_SCHED */
 static int sched_rt_global_constraints(void)
 {
@@ -9313,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
                      struct task_struct *tsk)
 {
 #ifdef CONFIG_RT_GROUP_SCHED
-       /* Don't accept realtime tasks when there is no way for them to run */
-       if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
+       if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
                return -EINVAL;
 #else
        /* We don't support RT-tasks being in separate groups */