Merge branch 'linux-2.6'
[safe/jmp/linux-2.6] / kernel / sched.c
index bb2c699..3aaa5c8 100644 (file)
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+#ifdef CONFIG_SMP
 static void hotplug_hrtick_disable(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
 {
        hotcpu_notifier(hotplug_hrtick, 0);
 }
+#endif /* CONFIG_SMP */
 
 static void init_rq_hrtick(struct rq *rq)
 {
@@ -4396,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
                             signal_pending(current)) ||
                            (state == TASK_KILLABLE &&
                             fatal_signal_pending(current))) {
-                               __remove_wait_queue(&x->wait, &wait);
-                               return -ERESTARTSYS;
+                               timeout = -ERESTARTSYS;
+                               break;
                        }
                        __set_current_state(state);
                        spin_unlock_irq(&x->wait.lock);
                        timeout = schedule_timeout(timeout);
                        spin_lock_irq(&x->wait.lock);
-                       if (!timeout) {
-                               __remove_wait_queue(&x->wait, &wait);
-                               return timeout;
-                       }
-               } while (!x->done);
+               } while (!x->done && timeout);
                __remove_wait_queue(&x->wait, &wait);
+               if (!x->done)
+                       return timeout;
        }
        x->done--;
-       return timeout;
+       return timeout ?: 1;
 }
 
 static long __sched
@@ -7241,6 +7241,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
 }
 
 /*
+ * Free current domain masks.
+ * Called after all cpus are attached to NULL domain.
+ */
+static void free_sched_domains(void)
+{
+       ndoms_cur = 0;
+       if (doms_cur != &fallback_doms)
+               kfree(doms_cur);
+       doms_cur = &fallback_doms;
+}
+
+/*
  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  * For now this just excludes isolated cpus, but could be used to
  * exclude other special cases in the future.
@@ -7387,6 +7399,7 @@ int arch_reinit_sched_domains(void)
        get_online_cpus();
        mutex_lock(&sched_domains_mutex);
        detach_destroy_domains(&cpu_online_map);
+       free_sched_domains();
        err = arch_init_sched_domains(&cpu_online_map);
        mutex_unlock(&sched_domains_mutex);
        put_online_cpus();
@@ -7472,6 +7485,7 @@ static int update_sched_domains(struct notifier_block *nfb,
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
                detach_destroy_domains(&cpu_online_map);
+               free_sched_domains();
                return NOTIFY_OK;
 
        case CPU_UP_CANCELED:
@@ -7490,8 +7504,16 @@ static int update_sched_domains(struct notifier_block *nfb,
                return NOTIFY_DONE;
        }
 
+#ifndef CONFIG_CPUSETS
+       /*
+        * Create default domain partitioning if cpusets are disabled.
+        * Otherwise we let cpusets rebuild the domains based on the
+        * current setup.
+        */
+
        /* The hotplug lock is already held by cpu_up/cpu_down */
        arch_init_sched_domains(&cpu_online_map);
+#endif
 
        return NOTIFY_OK;
 }
@@ -7631,7 +7653,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
        else
                rt_se->rt_rq = parent->my_q;
 
-       rt_se->rt_rq = &rq->rt;
        rt_se->my_q = rt_rq;
        rt_se->parent = parent;
        INIT_LIST_HEAD(&rt_se->run_list);
@@ -8353,7 +8374,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
 #ifdef CONFIG_CGROUP_SCHED
 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
 {
-       struct task_group *tgi, *parent = tg->parent;
+       struct task_group *tgi, *parent = tg ? tg->parent : NULL;
        unsigned long total = 0;
 
        if (!parent) {