[PATCH] mm: __alloc_pages cleanup
[safe/jmp/linux-2.6] / kernel / sched.c
index e75b301..b650667 100644 (file)
@@ -206,6 +206,7 @@ struct runqueue {
         */
        unsigned long nr_running;
 #ifdef CONFIG_SMP
+       unsigned long prio_bias;
        unsigned long cpu_load[3];
 #endif
        unsigned long long nr_switches;
@@ -260,8 +261,15 @@ struct runqueue {
 
 static DEFINE_PER_CPU(struct runqueue, runqueues);
 
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
 #define for_each_domain(cpu, domain) \
-       for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent)
+for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent)
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
@@ -287,6 +295,10 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)
 
 static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK
+       /* this is a valid case when another task releases the spinlock */
+       rq->lock.owner = current;
+#endif
        spin_unlock_irq(&rq->lock);
 }
 
@@ -395,6 +407,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
 
 #ifdef CONFIG_SMP
                /* domain-specific stats */
+               preempt_disable();
                for_each_domain(cpu, sd) {
                        enum idle_type itype;
                        char mask_str[NR_CPUS];
@@ -419,6 +432,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
                            sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
                            sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
                }
+               preempt_enable();
 #endif
        }
        return 0;
@@ -646,13 +660,68 @@ static int effective_prio(task_t *p)
        return prio;
 }
 
+#ifdef CONFIG_SMP
+static inline void inc_prio_bias(runqueue_t *rq, int prio)
+{
+       rq->prio_bias += MAX_PRIO - prio;
+}
+
+static inline void dec_prio_bias(runqueue_t *rq, int prio)
+{
+       rq->prio_bias -= MAX_PRIO - prio;
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running++;
+       if (rt_task(p)) {
+               if (p != rq->migration_thread)
+                       /*
+                        * The migration thread does the actual balancing. Do
+                        * not bias by its priority as the ultra high priority
+                        * will skew balancing adversely.
+                        */
+                       inc_prio_bias(rq, p->prio);
+       } else
+               inc_prio_bias(rq, p->static_prio);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running--;
+       if (rt_task(p)) {
+               if (p != rq->migration_thread)
+                       dec_prio_bias(rq, p->prio);
+       } else
+               dec_prio_bias(rq, p->static_prio);
+}
+#else
+static inline void inc_prio_bias(runqueue_t *rq, int prio)
+{
+}
+
+static inline void dec_prio_bias(runqueue_t *rq, int prio)
+{
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running++;
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+       rq->nr_running--;
+}
+#endif
+
 /*
  * __activate_task - move a task to the runqueue.
  */
 static inline void __activate_task(task_t *p, runqueue_t *rq)
 {
        enqueue_task(p, rq->active);
-       rq->nr_running++;
+       inc_nr_running(p, rq);
 }
 
 /*
@@ -661,10 +730,10 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
 static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
 {
        enqueue_task_head(p, rq->active);
-       rq->nr_running++;
+       inc_nr_running(p, rq);
 }
 
-static void recalc_task_prio(task_t *p, unsigned long long now)
+static int recalc_task_prio(task_t *p, unsigned long long now)
 {
        /* Caller must always ensure 'now >= p->timestamp' */
        unsigned long long __sleep_time = now - p->timestamp;
@@ -723,7 +792,7 @@ static void recalc_task_prio(task_t *p, unsigned long long now)
                }
        }
 
-       p->prio = effective_prio(p);
+       return effective_prio(p);
 }
 
 /*
@@ -746,7 +815,8 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
        }
 #endif
 
-       recalc_task_prio(p, now);
+       if (!rt_task(p))
+               p->prio = recalc_task_prio(p, now);
 
        /*
         * This checks to make sure it's not an uninterruptible task
@@ -780,7 +850,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
  */
 static void deactivate_task(struct task_struct *p, runqueue_t *rq)
 {
-       rq->nr_running--;
+       dec_nr_running(p, rq);
        dequeue_task(p, p->array);
        p->array = NULL;
 }
@@ -795,21 +865,28 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)
 #ifdef CONFIG_SMP
 static void resched_task(task_t *p)
 {
-       int need_resched, nrpolling;
+       int cpu;
 
        assert_spin_locked(&task_rq(p)->lock);
 
-       /* minimise the chance of sending an interrupt to poll_idle() */
-       nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
-       need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
-       nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
+       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+               return;
+
+       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+
+       cpu = task_cpu(p);
+       if (cpu == smp_processor_id())
+               return;
 
-       if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
-               smp_send_reschedule(task_cpu(p));
+       /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+       smp_mb();
+       if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+               smp_send_reschedule(cpu);
 }
 #else
 static inline void resched_task(task_t *p)
 {
+       assert_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 #endif
@@ -824,22 +901,12 @@ inline int task_curr(const task_t *p)
 }
 
 #ifdef CONFIG_SMP
-enum request_type {
-       REQ_MOVE_TASK,
-       REQ_SET_DOMAIN,
-};
-
 typedef struct {
        struct list_head list;
-       enum request_type type;
 
-       /* For REQ_MOVE_TASK */
        task_t *task;
        int dest_cpu;
 
-       /* For REQ_SET_DOMAIN */
-       struct sched_domain *sd;
-
        struct completion done;
 } migration_req_t;
 
@@ -861,7 +928,6 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
        }
 
        init_completion(&req->done);
-       req->type = REQ_MOVE_TASK;
        req->task = p;
        req->dest_cpu = dest_cpu;
        list_add(&req->list, &rq->migration_queue);
@@ -877,7 +943,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
  * smp_call_function() if an IPI is sent by the same process we are
  * waiting to become inactive.
  */
-void wait_task_inactive(task_t * p)
+void wait_task_inactive(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -928,27 +994,61 @@ void kick_process(task_t *p)
  * We want to under-estimate the load of migration sources, to
  * balance conservatively.
  */
-static inline unsigned long source_load(int cpu, int type)
+static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
 {
        runqueue_t *rq = cpu_rq(cpu);
-       unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       unsigned long running = rq->nr_running;
+       unsigned long source_load, cpu_load = rq->cpu_load[type-1],
+               load_now = running * SCHED_LOAD_SCALE;
+
        if (type == 0)
-               return load_now;
+               source_load = load_now;
+       else
+               source_load = min(cpu_load, load_now);
+
+       if (running > 1 || (idle == NOT_IDLE && running))
+               /*
+                * If we are busy rebalancing the load is biased by
+                * priority to create 'nice' support across cpus. When
+                * idle rebalancing we should only bias the source_load if
+                * there is more than one task running on that queue to
+                * prevent idle rebalance from trying to pull tasks from a
+                * queue with only one running task.
+                */
+               source_load = source_load * rq->prio_bias / running;
 
-       return min(rq->cpu_load[type-1], load_now);
+       return source_load;
+}
+
+static inline unsigned long source_load(int cpu, int type)
+{
+       return __source_load(cpu, type, NOT_IDLE);
 }
 
 /*
  * Return a high guess at the load of a migration-target cpu
  */
-static inline unsigned long target_load(int cpu, int type)
+static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
 {
        runqueue_t *rq = cpu_rq(cpu);
-       unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+       unsigned long running = rq->nr_running;
+       unsigned long target_load, cpu_load = rq->cpu_load[type-1],
+               load_now = running * SCHED_LOAD_SCALE;
+
        if (type == 0)
-               return load_now;
+               target_load = load_now;
+       else
+               target_load = max(cpu_load, load_now);
 
-       return max(rq->cpu_load[type-1], load_now);
+       if (running > 1 || (idle == NOT_IDLE && running))
+               target_load = target_load * rq->prio_bias / running;
+
+       return target_load;
+}
+
+static inline unsigned long target_load(int cpu, int type)
+{
+       return __target_load(cpu, type, NOT_IDLE);
 }
 
 /*
@@ -968,8 +1068,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                int local_group;
                int i;
 
+               /* Skip over this group if it has no CPUs allowed */
+               if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+                       goto nextgroup;
+
                local_group = cpu_isset(this_cpu, group->cpumask);
-               /* XXX: put a cpus allowed check */
 
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
@@ -994,6 +1097,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                        min_load = avg_load;
                        idlest = group;
                }
+nextgroup:
                group = group->next;
        } while (group != sd->groups);
 
@@ -1005,13 +1109,18 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 /*
  * find_idlest_queue - find the idlest runqueue among the cpus in group.
  */
-static int find_idlest_cpu(struct sched_group *group, int this_cpu)
+static int
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
+       cpumask_t tmp;
        unsigned long load, min_load = ULONG_MAX;
        int idlest = -1;
        int i;
 
-       for_each_cpu_mask(i, group->cpumask) {
+       /* Traverse only the allowed CPUs */
+       cpus_and(tmp, group->cpumask, p->cpus_allowed);
+
+       for_each_cpu_mask(i, tmp) {
                load = source_load(i, 0);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1023,8 +1132,59 @@ static int find_idlest_cpu(struct sched_group *group, int this_cpu)
        return idlest;
 }
 
+/*
+ * sched_balance_self: balance the current task (running on cpu) in domains
+ * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
+ * SD_BALANCE_EXEC.
+ *
+ * Balance, ie. select the least loaded group.
+ *
+ * Returns the target CPU number, or the same CPU if no balancing is needed.
+ *
+ * preempt must be disabled.
+ */
+static int sched_balance_self(int cpu, int flag)
+{
+       struct task_struct *t = current;
+       struct sched_domain *tmp, *sd = NULL;
 
-#endif
+       for_each_domain(cpu, tmp)
+               if (tmp->flags & flag)
+                       sd = tmp;
+
+       while (sd) {
+               cpumask_t span;
+               struct sched_group *group;
+               int new_cpu;
+               int weight;
+
+               span = sd->span;
+               group = find_idlest_group(sd, t, cpu);
+               if (!group)
+                       goto nextlevel;
+
+               new_cpu = find_idlest_cpu(group, t, cpu);
+               if (new_cpu == -1 || new_cpu == cpu)
+                       goto nextlevel;
+
+               /* Now try balancing at a lower domain level */
+               cpu = new_cpu;
+nextlevel:
+               sd = NULL;
+               weight = cpus_weight(span);
+               for_each_domain(cpu, tmp) {
+                       if (weight <= cpus_weight(tmp->span))
+                               break;
+                       if (tmp->flags & flag)
+                               sd = tmp;
+               }
+               /* while loop will break here if sd == NULL */
+       }
+
+       return cpu;
+}
+
+#endif /* CONFIG_SMP */
 
 /*
  * wake_idle() will wake a task on an idle cpu if task->cpu is
@@ -1078,7 +1238,7 @@ static inline int wake_idle(int cpu, task_t *p)
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(task_t * p, unsigned int state, int sync)
+static int try_to_wake_up(task_t *p, unsigned int state, int sync)
 {
        int cpu, this_cpu, success = 0;
        unsigned long flags;
@@ -1203,6 +1363,16 @@ out_activate:
        }
 
        /*
+        * Tasks that have marked their sleep as noninteractive get
+        * woken up without updating their sleep average. (i.e. their
+        * sleep is handled in a priority-neutral manner, no priority
+        * boost and no penalty.)
+        */
+       if (old_state & TASK_NONINTERACTIVE)
+               __activate_task(p, rq);
+       else
+               activate_task(p, rq, cpu == this_cpu);
+       /*
         * Sync wakeups (i.e. those types of wakeups where the waker
         * has indicated that it will leave the CPU in short order)
         * don't trigger a preemption, if the woken up task will run on
@@ -1210,7 +1380,6 @@ out_activate:
         * the waker guarantees that the freshly woken up task is going
         * to be considered on this CPU.)
         */
-       activate_task(p, rq, cpu == this_cpu);
        if (!sync || cpu != this_cpu) {
                if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
@@ -1225,7 +1394,7 @@ out:
        return success;
 }
 
-int fastcall wake_up_process(task_t * p)
+int fastcall wake_up_process(task_t *p)
 {
        return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
                                 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
@@ -1242,8 +1411,15 @@ int fastcall wake_up_state(task_t *p, unsigned int state)
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  */
-void fastcall sched_fork(task_t *p)
+void fastcall sched_fork(task_t *p, int clone_flags)
 {
+       int cpu = get_cpu();
+
+#ifdef CONFIG_SMP
+       cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
+#endif
+       set_task_cpu(p, cpu);
+
        /*
         * We mark the process as running here, but have not actually
         * inserted it onto the runqueue yet. This guarantees that
@@ -1284,12 +1460,10 @@ void fastcall sched_fork(task_t *p)
                 * runqueue lock is not a problem.
                 */
                current->time_slice = 1;
-               preempt_disable();
                scheduler_tick();
-               local_irq_enable();
-               preempt_enable();
-       } else
-               local_irq_enable();
+       }
+       local_irq_enable();
+       put_cpu();
 }
 
 /*
@@ -1299,54 +1473,17 @@ void fastcall sched_fork(task_t *p)
  * that must be done for every newly created context, then puts the task
  * on the runqueue and wakes it.
  */
-void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
+void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
 {
        unsigned long flags;
        int this_cpu, cpu;
        runqueue_t *rq, *this_rq;
-#ifdef CONFIG_SMP
-       struct sched_domain *tmp, *sd = NULL;
-#endif
 
        rq = task_rq_lock(p, &flags);
        BUG_ON(p->state != TASK_RUNNING);
        this_cpu = smp_processor_id();
        cpu = task_cpu(p);
 
-#ifdef CONFIG_SMP
-       for_each_domain(cpu, tmp)
-               if (tmp->flags & SD_BALANCE_FORK)
-                       sd = tmp;
-
-       if (sd) {
-               int new_cpu;
-               struct sched_group *group;
-
-               schedstat_inc(sd, sbf_cnt);
-               cpu = task_cpu(p);
-               group = find_idlest_group(sd, p, cpu);
-               if (!group) {
-                       schedstat_inc(sd, sbf_balanced);
-                       goto no_forkbalance;
-               }
-
-               new_cpu = find_idlest_cpu(group, cpu);
-               if (new_cpu == -1 || new_cpu == cpu) {
-                       schedstat_inc(sd, sbf_balanced);
-                       goto no_forkbalance;
-               }
-
-               if (cpu_isset(new_cpu, p->cpus_allowed)) {
-                       schedstat_inc(sd, sbf_pushed);
-                       set_task_cpu(p, new_cpu);
-                       task_rq_unlock(rq, &flags);
-                       rq = task_rq_lock(p, &flags);
-                       cpu = task_cpu(p);
-               }
-       }
-
-no_forkbalance:
-#endif
        /*
         * We decrease the sleep average of forking parents
         * and children as well, to keep max-interactive tasks
@@ -1372,7 +1509,7 @@ no_forkbalance:
                                list_add_tail(&p->run_list, &current->run_list);
                                p->array = current->array;
                                p->array->nr_active++;
-                               rq->nr_running++;
+                               inc_nr_running(p, rq);
                        }
                        set_need_resched();
                } else
@@ -1419,7 +1556,7 @@ no_forkbalance:
  * artificially, because any timeslice recovered here
  * was given away by the parent in the first place.)
  */
-void fastcall sched_exit(task_t * p)
+void fastcall sched_exit(task_t *p)
 {
        unsigned long flags;
        runqueue_t *rq;
@@ -1429,7 +1566,7 @@ void fastcall sched_exit(task_t * p)
         * the sleep_avg of the parent as well.
         */
        rq = task_rq_lock(p->parent, &flags);
-       if (p->first_time_slice) {
+       if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
                p->parent->time_slice += p->time_slice;
                if (unlikely(p->parent->time_slice > task_timeslice(p)))
                        p->parent->time_slice = task_timeslice(p);
@@ -1461,6 +1598,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next)
 
 /**
  * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
  * @prev: the thread we just switched away from.
  *
  * finish_task_switch must be called after the context switch, paired
@@ -1695,42 +1833,16 @@ out:
 }
 
 /*
- * sched_exec(): find the highest-level, exec-balance-capable
- * domain and try to migrate the task to the least loaded CPU.
- *
- * execve() is a valuable balancing opportunity, because at this point
- * the task has the smallest effective memory and cache footprint.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
  */
 void sched_exec(void)
 {
-       struct sched_domain *tmp, *sd = NULL;
        int new_cpu, this_cpu = get_cpu();
-
-       for_each_domain(this_cpu, tmp)
-               if (tmp->flags & SD_BALANCE_EXEC)
-                       sd = tmp;
-
-       if (sd) {
-               struct sched_group *group;
-               schedstat_inc(sd, sbe_cnt);
-               group = find_idlest_group(sd, current, this_cpu);
-               if (!group) {
-                       schedstat_inc(sd, sbe_balanced);
-                       goto out;
-               }
-               new_cpu = find_idlest_cpu(group, this_cpu);
-               if (new_cpu == -1 || new_cpu == this_cpu) {
-                       schedstat_inc(sd, sbe_balanced);
-                       goto out;
-               }
-
-               schedstat_inc(sd, sbe_pushed);
-               put_cpu();
-               sched_migrate_task(current, new_cpu);
-               return;
-       }
-out:
+       new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
        put_cpu();
+       if (new_cpu != this_cpu)
+               sched_migrate_task(current, new_cpu);
 }
 
 /*
@@ -1742,9 +1854,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
               runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
 {
        dequeue_task(p, src_array);
-       src_rq->nr_running--;
+       dec_nr_running(p, src_rq);
        set_task_cpu(p, this_cpu);
-       this_rq->nr_running++;
+       inc_nr_running(p, this_rq);
        enqueue_task(p, this_array);
        p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
                                + this_rq->timestamp_last_tick;
@@ -1761,7 +1873,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
  */
 static inline
 int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
-            struct sched_domain *sd, enum idle_type idle, int *all_pinned)
+                    struct sched_domain *sd, enum idle_type idle,
+                    int *all_pinned)
 {
        /*
         * We do not migrate tasks that are:
@@ -1891,10 +2004,11 @@ out:
  */
 static struct sched_group *
 find_busiest_group(struct sched_domain *sd, int this_cpu,
-                  unsigned long *imbalance, enum idle_type idle)
+                  unsigned long *imbalance, enum idle_type idle, int *sd_idle)
 {
        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
+       unsigned long max_pull;
        int load_idx;
 
        max_load = this_load = total_load = total_pwr = 0;
@@ -1916,11 +2030,14 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                avg_load = 0;
 
                for_each_cpu_mask(i, group->cpumask) {
+                       if (*sd_idle && !idle_cpu(i))
+                               *sd_idle = 0;
+
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
-                               load = target_load(i, load_idx);
+                               load = __target_load(i, load_idx, idle);
                        else
-                               load = source_load(i, load_idx);
+                               load = __source_load(i, load_idx, idle);
 
                        avg_load += load;
                }
@@ -1941,7 +2058,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                group = group->next;
        } while (group != sd->groups);
 
-       if (!busiest || this_load >= max_load)
+       if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
                goto out_balanced;
 
        avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -1961,8 +2078,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * by pulling tasks to us.  Be careful of negative numbers as they'll
         * appear as very large values with unsigned longs.
         */
+
+       /* Don't want to pull so many tasks that a group would go idle */
+       max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min((max_load - avg_load) * busiest->cpu_power,
+       *imbalance = min(max_pull * busiest->cpu_power,
                                (avg_load - this_load) * this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
@@ -2021,14 +2142,15 @@ out_balanced:
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
-static runqueue_t *find_busiest_queue(struct sched_group *group)
+static runqueue_t *find_busiest_queue(struct sched_group *group,
+       enum idle_type idle)
 {
        unsigned long load, max_load = 0;
        runqueue_t *busiest = NULL;
        int i;
 
        for_each_cpu_mask(i, group->cpumask) {
-               load = source_load(i, 0);
+               load = __source_load(i, 0, idle);
 
                if (load > max_load) {
                        max_load = load;
@@ -2040,6 +2162,12 @@ static runqueue_t *find_busiest_queue(struct sched_group *group)
 }
 
 /*
+ * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
+ * so long as it is large enough.
+ */
+#define MAX_PINNED_INTERVAL    512
+
+/*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
  *
@@ -2051,19 +2179,22 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
        struct sched_group *group;
        runqueue_t *busiest;
        unsigned long imbalance;
-       int nr_moved, all_pinned;
+       int nr_moved, all_pinned = 0;
        int active_balance = 0;
+       int sd_idle = 0;
+
+       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
-       spin_lock(&this_rq->lock);
        schedstat_inc(sd, lb_cnt[idle]);
 
-       group = find_busiest_group(sd, this_cpu, &imbalance, idle);
+       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[idle]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group);
+       busiest = find_busiest_queue(group, idle);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[idle]);
                goto out_balanced;
@@ -2081,19 +2212,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                 * still unbalanced. nr_moved simply stays zero, so it is
                 * correctly treated as an imbalance.
                 */
-               double_lock_balance(this_rq, busiest);
+               double_rq_lock(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                               imbalance, sd, idle,
-                                               &all_pinned);
-               spin_unlock(&busiest->lock);
+                                       imbalance, sd, idle, &all_pinned);
+               double_rq_unlock(this_rq, busiest);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                if (unlikely(all_pinned))
                        goto out_balanced;
        }
 
-       spin_unlock(&this_rq->lock);
-
        if (!nr_moved) {
                schedstat_inc(sd, lb_failed[idle]);
                sd->nr_balance_failed++;
@@ -2101,6 +2229,16 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
                        spin_lock(&busiest->lock);
+
+                       /* don't kick the migration_thread, if the curr
+                        * task on busiest cpu can't be moved to this_cpu
+                        */
+                       if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
+                               spin_unlock(&busiest->lock);
+                               all_pinned = 1;
+                               goto out_one_pinned;
+                       }
+
                        if (!busiest->active_balance) {
                                busiest->active_balance = 1;
                                busiest->push_cpu = this_cpu;
@@ -2133,18 +2271,23 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                        sd->balance_interval *= 2;
        }
 
+       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return nr_moved;
 
 out_balanced:
-       spin_unlock(&this_rq->lock);
-
        schedstat_inc(sd, lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
+
+out_one_pinned:
        /* tune up the balancing interval */
-       if (sd->balance_interval < sd->max_interval)
+       if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
+                       (sd->balance_interval < sd->max_interval))
                sd->balance_interval *= 2;
 
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        return 0;
 }
 
@@ -2162,15 +2305,19 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
        runqueue_t *busiest = NULL;
        unsigned long imbalance;
        int nr_moved = 0;
+       int sd_idle = 0;
+
+       if (sd->flags & SD_SHARE_CPUPOWER)
+               sd_idle = 1;
 
        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
-       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
+       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
        if (!group) {
                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
                goto out_balanced;
        }
 
-       busiest = find_busiest_queue(group);
+       busiest = find_busiest_queue(group, NEWLY_IDLE);
        if (!busiest) {
                schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
                goto out_balanced;
@@ -2178,22 +2325,30 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
 
        BUG_ON(busiest == this_rq);
 
-       /* Attempt to move tasks */
-       double_lock_balance(this_rq, busiest);
-
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-       nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+       nr_moved = 0;
+       if (busiest->nr_running > 1) {
+               /* Attempt to move tasks */
+               double_lock_balance(this_rq, busiest);
+               nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, NEWLY_IDLE, NULL);
-       if (!nr_moved)
+               spin_unlock(&busiest->lock);
+       }
+
+       if (!nr_moved) {
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
-       else
+               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+                       return -1;
+       } else
                sd->nr_balance_failed = 0;
 
-       spin_unlock(&busiest->lock);
        return nr_moved;
 
 out_balanced:
        schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
+       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+               return -1;
        sd->nr_balance_failed = 0;
        return 0;
 }
@@ -2318,7 +2473,11 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
 
                if (j - sd->last_balance >= interval) {
                        if (load_balance(this_cpu, this_rq, sd, idle)) {
-                               /* We've pulled tasks over so no longer idle */
+                               /*
+                                * We've pulled tasks over so either we're no
+                                * longer idle, or one of our SMT siblings is
+                                * not idle.
+                                */
                                idle = NOT_IDLE;
                        }
                        sd->last_balance += interval;
@@ -2451,8 +2610,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
                cpustat->idle = cputime64_add(cpustat->idle, tmp);
        /* Account for system time used */
        acct_update_integrals(p);
-       /* Update rss highwater mark */
-       update_mem_hiwater(p);
 }
 
 /*
@@ -2577,6 +2734,13 @@ out:
 }
 
 #ifdef CONFIG_SCHED_SMT
+static inline void wakeup_busy_runqueue(runqueue_t *rq)
+{
+       /* If an SMT runqueue is sleeping due to priority reasons wake it up */
+       if (rq->curr == rq->idle && rq->nr_running)
+               resched_task(rq->idle);
+}
+
 static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2610,12 +2774,7 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
        for_each_cpu_mask(i, sibling_map) {
                runqueue_t *smt_rq = cpu_rq(i);
 
-               /*
-                * If an SMT sibling task is sleeping due to priority
-                * reasons wake it up now.
-                */
-               if (smt_rq->curr == smt_rq->idle && smt_rq->nr_running)
-                       resched_task(smt_rq->idle);
+               wakeup_busy_runqueue(smt_rq);
        }
 
        for_each_cpu_mask(i, sibling_map)
@@ -2626,6 +2785,16 @@ static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
         */
 }
 
+/*
+ * number of 'lost' timeslices this task wont be able to fully
+ * utilize, if another task runs on a sibling. This models the
+ * slowdown effect of other tasks running on siblings:
+ */
+static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
+{
+       return p->time_slice * (100 - sd->per_cpu_gain) / 100;
+}
+
 static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
 {
        struct sched_domain *tmp, *sd = NULL;
@@ -2669,6 +2838,10 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                runqueue_t *smt_rq = cpu_rq(i);
                task_t *smt_curr = smt_rq->curr;
 
+               /* Kernel threads do not participate in dependent sleeping */
+               if (!p->mm || !smt_curr->mm || rt_task(p))
+                       goto check_smt_task;
+
                /*
                 * If a user task with lower static priority than the
                 * running task on the SMT sibling is trying to schedule,
@@ -2677,21 +2850,45 @@ static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
                 * task from using an unfair proportion of the
                 * physical cpu's resources. -ck
                 */
-               if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(p) || rt_task(smt_curr)) &&
-                       p->mm && smt_curr->mm && !rt_task(p))
-                               ret = 1;
+               if (rt_task(smt_curr)) {
+                       /*
+                        * With real time tasks we run non-rt tasks only
+                        * per_cpu_gain% of the time.
+                        */
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       ret = 1;
+               } else
+                       if (smt_curr->static_prio < p->static_prio &&
+                               !TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(smt_curr, sd) > task_timeslice(p))
+                                       ret = 1;
+
+check_smt_task:
+               if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
+                       rt_task(smt_curr))
+                               continue;
+               if (!p->mm) {
+                       wakeup_busy_runqueue(smt_rq);
+                       continue;
+               }
 
                /*
-                * Reschedule a lower priority task on the SMT sibling,
-                * or wake it up if it has been put to sleep for priority
-                * reasons.
+                * Reschedule a lower priority task on the SMT sibling for
+                * it to be put to sleep, or wake it up if it has been put to
+                * sleep for priority reasons to see if it should run now.
                 */
-               if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
-                       task_timeslice(smt_curr) || rt_task(p)) &&
-                       smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
-                       (smt_curr == smt_rq->idle && smt_rq->nr_running))
-                               resched_task(smt_curr);
+               if (rt_task(p)) {
+                       if ((jiffies % DEF_TIMESLICE) >
+                               (sd->per_cpu_gain * DEF_TIMESLICE / 100))
+                                       resched_task(smt_curr);
+               } else {
+                       if (TASK_PREEMPTS_CURR(p, smt_rq) &&
+                               smt_slice(p, sd) > task_timeslice(smt_curr))
+                                       resched_task(smt_curr);
+                       else
+                               wakeup_busy_runqueue(smt_rq);
+               }
        }
 out_unlock:
        for_each_cpu_mask(i, sibling_map)
@@ -2753,7 +2950,7 @@ asmlinkage void __sched schedule(void)
        struct list_head *queue;
        unsigned long long now;
        unsigned long run_time;
-       int cpu, idx;
+       int cpu, idx, new_prio;
 
        /*
         * Test if we are atomic.  Since do_exit() needs to call into
@@ -2875,15 +3072,21 @@ go_idle:
                        delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
 
                array = next->array;
-               dequeue_task(next, array);
-               recalc_task_prio(next, next->timestamp + delta);
-               enqueue_task(next, array);
+               new_prio = recalc_task_prio(next, next->timestamp + delta);
+
+               if (unlikely(next->prio != new_prio)) {
+                       dequeue_task(next, array);
+                       next->prio = new_prio;
+                       enqueue_task(next, array);
+               } else
+                       requeue_task(next, array);
        }
        next->activated = 0;
 switch_tasks:
        if (next == rq->idle)
                schedstat_inc(rq, sched_goidle);
        prefetch(next);
+       prefetch_stack(next);
        clear_tsk_need_resched(prev);
        rcu_qsctr_inc(task_cpu(prev));
 
@@ -3011,7 +3214,8 @@ need_resched:
 
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+                         void *key)
 {
        task_t *p = curr->private;
        return try_to_wake_up(p, mode, sync);
@@ -3053,7 +3257,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * @key: is directly passed to the wakeup function
  */
 void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
-                               int nr_exclusive, void *key)
+                       int nr_exclusive, void *key)
 {
        unsigned long flags;
 
@@ -3085,7 +3289,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  *
  * On UP it can prevent extra preemption.
  */
-void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall
+__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
        unsigned long flags;
        int sync = 1;
@@ -3276,7 +3481,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
 
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall __sched
+interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
        SLEEP_ON_VAR
 
@@ -3344,8 +3550,10 @@ void set_user_nice(task_t *p, long nice)
                goto out_unlock;
        }
        array = p->array;
-       if (array)
+       if (array) {
                dequeue_task(p, array);
+               dec_prio_bias(rq, p->static_prio);
+       }
 
        old_prio = p->prio;
        new_prio = NICE_TO_PRIO(nice);
@@ -3355,6 +3563,7 @@ void set_user_nice(task_t *p, long nice)
 
        if (array) {
                enqueue_task(p, array);
+               inc_prio_bias(rq, p->static_prio);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -3375,8 +3584,8 @@ EXPORT_SYMBOL(set_user_nice);
  */
 int can_nice(const task_t *p, const int nice)
 {
-       /* convert nice value [19,-20] to rlimit style value [0,39] */
-       int nice_rlim = 19 - nice;
+       /* convert nice value [19,-20] to rlimit style value [1,40] */
+       int nice_rlim = 20 - nice;
        return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
                capable(CAP_SYS_NICE));
 }
@@ -3445,15 +3654,7 @@ int task_nice(const task_t *p)
 {
        return TASK_NICE(p);
 }
-
-/*
- * The only users of task_nice are binfmt_elf and binfmt_elf32.
- * binfmt_elf is no longer modular, but binfmt_elf32 still is.
- * Therefore, task_nice is needed if there is a compat_mode.
- */
-#ifdef CONFIG_COMPAT
 EXPORT_SYMBOL_GPL(task_nice);
-#endif
 
 /**
  * idle_cpu - is a given cpu idle currently?
@@ -3464,8 +3665,6 @@ int idle_cpu(int cpu)
        return cpu_curr(cpu) == cpu_rq(cpu)->idle;
 }
 
-EXPORT_SYMBOL_GPL(idle_cpu);
-
 /**
  * idle_task - return the idle task for a given cpu.
  * @cpu: the processor in question.
@@ -3491,7 +3690,7 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
        p->policy = policy;
        p->rt_priority = prio;
        if (policy != SCHED_NORMAL)
-               p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
+               p->prio = MAX_RT_PRIO-1 - p->rt_priority;
        else
                p->prio = p->static_prio;
 }
@@ -3503,7 +3702,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  */
-int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param)
+int sched_setscheduler(struct task_struct *p, int policy,
+                      struct sched_param *param)
 {
        int retval;
        int oldprio, oldpolicy = -1;
@@ -3523,18 +3723,31 @@ recheck:
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
         */
        if (param->sched_priority < 0 ||
-           param->sched_priority > MAX_USER_RT_PRIO-1)
+           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
+           (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
                return -EINVAL;
        if ((policy == SCHED_NORMAL) != (param->sched_priority == 0))
                return -EINVAL;
 
-       if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
-           param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur &&
-           !capable(CAP_SYS_NICE))
-               return -EPERM;
-       if ((current->euid != p->euid) && (current->euid != p->uid) &&
-           !capable(CAP_SYS_NICE))
-               return -EPERM;
+       /*
+        * Allow unprivileged RT tasks to decrease priority:
+        */
+       if (!capable(CAP_SYS_NICE)) {
+               /* can't change policy */
+               if (policy != p->policy &&
+                       !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
+                       return -EPERM;
+               /* can't increase priority */
+               if (policy != SCHED_NORMAL &&
+                   param->sched_priority > p->rt_priority &&
+                   param->sched_priority >
+                               p->signal->rlim[RLIMIT_RTPRIO].rlim_cur)
+                       return -EPERM;
+               /* can't change other user's priorities */
+               if ((current->euid != p->euid) &&
+                   (current->euid != p->uid))
+                       return -EPERM;
+       }
 
        retval = security_task_setscheduler(p, policy, param);
        if (retval)
@@ -3573,7 +3786,8 @@ recheck:
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);
 
-static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 {
        int retval;
        struct sched_param lparam;
@@ -3840,7 +4054,7 @@ asmlinkage long sys_sched_yield(void)
        if (rt_task(current))
                target = rq->active;
 
-       if (current->array->nr_active == 1) {
+       if (array->nr_active == 1) {
                schedstat_inc(rq, yld_act_empty);
                if (!rq->expired->nr_active)
                        schedstat_inc(rq, yld_both_empty);
@@ -3871,6 +4085,13 @@ asmlinkage long sys_sched_yield(void)
 
 static inline void __cond_resched(void)
 {
+       /*
+        * The BKS might be reacquired before we have dropped
+        * PREEMPT_ACTIVE, which could trigger a second
+        * cond_resched() call.
+        */
+       if (unlikely(preempt_count()))
+               return;
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                schedule();
@@ -3897,7 +4118,7 @@ EXPORT_SYMBOL(cond_resched);
  * operations here to prevent schedule() from being called twice (once via
  * spin_unlock(), once by hand).
  */
-int cond_resched_lock(spinlock_t * lock)
+int cond_resched_lock(spinlock_t *lock)
 {
        int ret = 0;
 
@@ -4080,7 +4301,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
        return list_entry(p->sibling.next,struct task_struct,sibling);
 }
 
-static void show_task(task_t * p)
+static void show_task(task_t *p)
 {
        task_t *relative;
        unsigned state;
@@ -4106,7 +4327,7 @@ static void show_task(task_t * p)
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        {
-               unsigned long * n = (unsigned long *) (p->thread_info+1);
+               unsigned long *n = (unsigned long *) (p->thread_info+1);
                while (!*n)
                        n++;
                free = (unsigned long) n - (unsigned long)(p->thread_info+1);
@@ -4160,6 +4381,14 @@ void show_state(void)
        read_unlock(&tasklist_lock);
 }
 
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: cpu the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
 void __devinit init_idle(task_t *idle, int cpu)
 {
        runqueue_t *rq = cpu_rq(cpu);
@@ -4177,7 +4406,6 @@ void __devinit init_idle(task_t *idle, int cpu)
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        idle->oncpu = 1;
 #endif
-       set_tsk_need_resched(idle);
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
@@ -4308,7 +4536,7 @@ out:
  * thread migration by bumping thread off CPU then 'pushing' onto
  * another runqueue.
  */
-static int migration_thread(void * data)
+static int migration_thread(void *data)
 {
        runqueue_t *rq;
        int cpu = (long)data;
@@ -4321,8 +4549,7 @@ static int migration_thread(void * data)
                struct list_head *head;
                migration_req_t *req;
 
-               if (current->flags & PF_FREEZE)
-                       refrigerator(PF_FREEZE);
+               try_to_freeze();
 
                spin_lock_irq(&rq->lock);
 
@@ -4347,17 +4574,9 @@ static int migration_thread(void * data)
                req = list_entry(head->next, migration_req_t, list);
                list_del_init(head->next);
 
-               if (req->type == REQ_MOVE_TASK) {
-                       spin_unlock(&rq->lock);
-                       __migrate_task(req->task, cpu, req->dest_cpu);
-                       local_irq_enable();
-               } else if (req->type == REQ_SET_DOMAIN) {
-                       rq->sd = req->sd;
-                       spin_unlock_irq(&rq->lock);
-               } else {
-                       spin_unlock_irq(&rq->lock);
-                       WARN_ON(1);
-               }
+               spin_unlock(&rq->lock);
+               __migrate_task(req->task, cpu, req->dest_cpu);
+               local_irq_enable();
 
                complete(&req->done);
        }
@@ -4561,7 +4780,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
                /* Unbind it from offline cpu so it can run.  Fall thru. */
-               kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id());
+               kthread_bind(cpu_rq(cpu)->migration_thread,
+                            any_online_cpu(cpu_online_map));
                kthread_stop(cpu_rq(cpu)->migration_thread);
                cpu_rq(cpu)->migration_thread = NULL;
                break;
@@ -4588,7 +4808,6 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                        migration_req_t *req;
                        req = list_entry(rq->migration_queue.next,
                                         migration_req_t, list);
-                       BUG_ON(req->type != REQ_MOVE_TASK);
                        list_del_init(&req->list);
                        complete(&req->done);
                }
@@ -4619,7 +4838,7 @@ int __init migration_init(void)
 #endif
 
 #ifdef CONFIG_SMP
-#define SCHED_DOMAIN_DEBUG
+#undef SCHED_DOMAIN_DEBUG
 #ifdef SCHED_DOMAIN_DEBUG
 static void sched_domain_debug(struct sched_domain *sd, int cpu)
 {
@@ -4712,7 +4931,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 #define sched_domain_debug(sd, cpu) {}
 #endif
 
-static int __devinit sd_degenerate(struct sched_domain *sd)
+static int sd_degenerate(struct sched_domain *sd)
 {
        if (cpus_weight(sd->span) == 1)
                return 1;
@@ -4735,7 +4954,7 @@ static int __devinit sd_degenerate(struct sched_domain *sd)
        return 1;
 }
 
-static int __devinit sd_parent_degenerate(struct sched_domain *sd,
+static int sd_parent_degenerate(struct sched_domain *sd,
                                                struct sched_domain *parent)
 {
        unsigned long cflags = sd->flags, pflags = parent->flags;
@@ -4767,12 +4986,9 @@ static int __devinit sd_parent_degenerate(struct sched_domain *sd,
  * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
  * hold the hotplug lock.
  */
-void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
+static void cpu_attach_domain(struct sched_domain *sd, int cpu)
 {
-       migration_req_t req;
-       unsigned long flags;
        runqueue_t *rq = cpu_rq(cpu);
-       int local = 1;
        struct sched_domain *tmp;
 
        /* Remove the sched domains which do not contribute to scheduling. */
@@ -4789,28 +5005,11 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu)
 
        sched_domain_debug(sd, cpu);
 
-       spin_lock_irqsave(&rq->lock, flags);
-
-       if (cpu == smp_processor_id() || !cpu_online(cpu)) {
-               rq->sd = sd;
-       } else {
-               init_completion(&req.done);
-               req.type = REQ_SET_DOMAIN;
-               req.sd = sd;
-               list_add(&req.list, &rq->migration_queue);
-               local = 0;
-       }
-
-       spin_unlock_irqrestore(&rq->lock, flags);
-
-       if (!local) {
-               wake_up_process(rq->migration_thread);
-               wait_for_completion(&req.done);
-       }
+       rcu_assign_pointer(rq->sd, sd);
 }
 
 /* cpus with isolated domains */
-cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
+static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
 
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
@@ -4838,8 +5037,8 @@ __setup ("isolcpus=", isolated_cpu_setup);
  * covered by the given span, and will set each group's ->cpumask correctly,
  * and ->cpu_power to 0.
  */
-void __devinit init_sched_build_groups(struct sched_group groups[],
-                       cpumask_t span, int (*group_fn)(int cpu))
+static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
+                                   int (*group_fn)(int cpu))
 {
        struct sched_group *first = NULL, *last = NULL;
        cpumask_t covered = CPU_MASK_NONE;
@@ -4872,15 +5071,89 @@ void __devinit init_sched_build_groups(struct sched_group groups[],
        last->next = first;
 }
 
+#define SD_NODES_PER_DOMAIN 16
 
-#ifdef ARCH_HAS_SCHED_DOMAIN
-extern void __devinit arch_init_sched_domains(void);
-extern void __devinit arch_destroy_sched_domains(void);
-#else
+#ifdef CONFIG_NUMA
+/**
+ * find_next_best_node - find the next node to include in a sched_domain
+ * @node: node whose sched_domain we're building
+ * @used_nodes: nodes already in the sched_domain
+ *
+ * Find the next node to include in a given scheduling domain.  Simply
+ * finds the closest node not already in the @used_nodes map.
+ *
+ * Should use nodemask_t.
+ */
+static int find_next_best_node(int node, unsigned long *used_nodes)
+{
+       int i, n, val, min_val, best_node = 0;
+
+       min_val = INT_MAX;
+
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               /* Start at @node */
+               n = (node + i) % MAX_NUMNODES;
+
+               if (!nr_cpus_node(n))
+                       continue;
+
+               /* Skip already used nodes */
+               if (test_bit(n, used_nodes))
+                       continue;
+
+               /* Simple min distance search */
+               val = node_distance(node, n);
+
+               if (val < min_val) {
+                       min_val = val;
+                       best_node = n;
+               }
+       }
+
+       set_bit(best_node, used_nodes);
+       return best_node;
+}
+
+/**
+ * sched_domain_node_span - get a cpumask for a node's sched_domain
+ * @node: node whose cpumask we're constructing
+ * @size: number of nodes to include in this span
+ *
+ * Given a node, construct a good cpumask for its sched_domain to span.  It
+ * should be one that prevents unnecessary balancing, but also spreads tasks
+ * out optimally.
+ */
+static cpumask_t sched_domain_node_span(int node)
+{
+       int i;
+       cpumask_t span, nodemask;
+       DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
+
+       cpus_clear(span);
+       bitmap_zero(used_nodes, MAX_NUMNODES);
+
+       nodemask = node_to_cpumask(node);
+       cpus_or(span, span, nodemask);
+       set_bit(node, used_nodes);
+
+       for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
+               int next_node = find_next_best_node(node, used_nodes);
+               nodemask = node_to_cpumask(next_node);
+               cpus_or(span, span, nodemask);
+       }
+
+       return span;
+}
+#endif
+
+/*
+ * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
+ * can switch it on easily if needed.
+ */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
 static struct sched_group sched_group_cpus[NR_CPUS];
-static int __devinit cpu_to_cpu_group(int cpu)
+static int cpu_to_cpu_group(int cpu)
 {
        return cpu;
 }
@@ -4888,7 +5161,7 @@ static int __devinit cpu_to_cpu_group(int cpu)
 
 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
 static struct sched_group sched_group_phys[NR_CPUS];
-static int __devinit cpu_to_phys_group(int cpu)
+static int cpu_to_phys_group(int cpu)
 {
 #ifdef CONFIG_SCHED_SMT
        return first_cpu(cpu_sibling_map[cpu]);
@@ -4898,74 +5171,86 @@ static int __devinit cpu_to_phys_group(int cpu)
 }
 
 #ifdef CONFIG_NUMA
-
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
-static struct sched_group sched_group_nodes[MAX_NUMNODES];
-static int __devinit cpu_to_node_group(int cpu)
-{
-       return cpu_to_node(cpu);
-}
-#endif
-
-#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
 /*
- * The domains setup code relies on siblings not spanning
- * multiple nodes. Make sure the architecture has a proper
- * siblings map:
+ * The init_sched_build_groups can't handle what we want to do with node
+ * groups, so roll our own. Now each node has its own list of groups which
+ * gets dynamically allocated.
  */
-static void check_sibling_maps(void)
-{
-       int i, j;
+static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
 
-       for_each_online_cpu(i) {
-               for_each_cpu_mask(j, cpu_sibling_map[i]) {
-                       if (cpu_to_node(i) != cpu_to_node(j)) {
-                               printk(KERN_INFO "warning: CPU %d siblings map "
-                                       "to different node - isolating "
-                                       "them.\n", i);
-                               cpu_sibling_map[i] = cpumask_of_cpu(i);
-                               break;
-                       }
-               }
-       }
+static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
+
+static int cpu_to_allnodes_group(int cpu)
+{
+       return cpu_to_node(cpu);
 }
 #endif
 
 /*
- * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
  */
-static void __devinit arch_init_sched_domains(void)
+void build_sched_domains(const cpumask_t *cpu_map)
 {
        int i;
-       cpumask_t cpu_default_map;
+#ifdef CONFIG_NUMA
+       struct sched_group **sched_group_nodes = NULL;
+       struct sched_group *sched_group_allnodes = NULL;
 
-#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
-       check_sibling_maps();
-#endif
        /*
-        * Setup mask for cpus without special case scheduling requirements.
-        * For now this just excludes isolated cpus, but could be used to
-        * exclude other special cases in the future.
+        * Allocate the per-node list of sched groups
         */
-       cpus_complement(cpu_default_map, cpu_isolated_map);
-       cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
+       sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+                                          GFP_ATOMIC);
+       if (!sched_group_nodes) {
+               printk(KERN_WARNING "Can not alloc sched group node list\n");
+               return;
+       }
+       sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+#endif
 
        /*
-        * Set up domains. Isolated domains just stay on the NULL domain.
+        * Set up domains for cpus specified by the cpu_map.
         */
-       for_each_cpu_mask(i, cpu_default_map) {
+       for_each_cpu_mask(i, *cpu_map) {
                int group;
                struct sched_domain *sd = NULL, *p;
                cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
 
-               cpus_and(nodemask, nodemask, cpu_default_map);
+               cpus_and(nodemask, nodemask, *cpu_map);
 
 #ifdef CONFIG_NUMA
+               if (cpus_weight(*cpu_map)
+                               > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
+                       if (!sched_group_allnodes) {
+                               sched_group_allnodes
+                                       = kmalloc(sizeof(struct sched_group)
+                                                       * MAX_NUMNODES,
+                                                 GFP_KERNEL);
+                               if (!sched_group_allnodes) {
+                                       printk(KERN_WARNING
+                                       "Can not alloc allnodes sched group\n");
+                                       break;
+                               }
+                               sched_group_allnodes_bycpu[i]
+                                               = sched_group_allnodes;
+                       }
+                       sd = &per_cpu(allnodes_domains, i);
+                       *sd = SD_ALLNODES_INIT;
+                       sd->span = *cpu_map;
+                       group = cpu_to_allnodes_group(i);
+                       sd->groups = &sched_group_allnodes[group];
+                       p = sd;
+               } else
+                       p = NULL;
+
                sd = &per_cpu(node_domains, i);
-               group = cpu_to_node_group(i);
                *sd = SD_NODE_INIT;
-               sd->span = cpu_default_map;
-               sd->groups = &sched_group_nodes[group];
+               sd->span = sched_domain_node_span(cpu_to_node(i));
+               sd->parent = p;
+               cpus_and(sd->span, sd->span, *cpu_map);
 #endif
 
                p = sd;
@@ -4982,7 +5267,7 @@ static void __devinit arch_init_sched_domains(void)
                group = cpu_to_cpu_group(i);
                *sd = SD_SIBLING_INIT;
                sd->span = cpu_sibling_map[i];
-               cpus_and(sd->span, sd->span, cpu_default_map);
+               cpus_and(sd->span, sd->span, *cpu_map);
                sd->parent = p;
                sd->groups = &sched_group_cpus[group];
 #endif
@@ -4990,9 +5275,9 @@ static void __devinit arch_init_sched_domains(void)
 
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
-       for_each_online_cpu(i) {
+       for_each_cpu_mask(i, *cpu_map) {
                cpumask_t this_sibling_map = cpu_sibling_map[i];
-               cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
+               cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
                if (i != first_cpu(this_sibling_map))
                        continue;
 
@@ -5005,7 +5290,7 @@ static void __devinit arch_init_sched_domains(void)
        for (i = 0; i < MAX_NUMNODES; i++) {
                cpumask_t nodemask = node_to_cpumask(i);
 
-               cpus_and(nodemask, nodemask, cpu_default_map);
+               cpus_and(nodemask, nodemask, *cpu_map);
                if (cpus_empty(nodemask))
                        continue;
 
@@ -5015,12 +5300,81 @@ static void __devinit arch_init_sched_domains(void)
 
 #ifdef CONFIG_NUMA
        /* Set up node groups */
-       init_sched_build_groups(sched_group_nodes, cpu_default_map,
-                                       &cpu_to_node_group);
+       if (sched_group_allnodes)
+               init_sched_build_groups(sched_group_allnodes, *cpu_map,
+                                       &cpu_to_allnodes_group);
+
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               /* Set up node groups */
+               struct sched_group *sg, *prev;
+               cpumask_t nodemask = node_to_cpumask(i);
+               cpumask_t domainspan;
+               cpumask_t covered = CPU_MASK_NONE;
+               int j;
+
+               cpus_and(nodemask, nodemask, *cpu_map);
+               if (cpus_empty(nodemask)) {
+                       sched_group_nodes[i] = NULL;
+                       continue;
+               }
+
+               domainspan = sched_domain_node_span(i);
+               cpus_and(domainspan, domainspan, *cpu_map);
+
+               sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+               sched_group_nodes[i] = sg;
+               for_each_cpu_mask(j, nodemask) {
+                       struct sched_domain *sd;
+                       sd = &per_cpu(node_domains, j);
+                       sd->groups = sg;
+                       if (sd->groups == NULL) {
+                               /* Turn off balancing if we have no groups */
+                               sd->flags = 0;
+                       }
+               }
+               if (!sg) {
+                       printk(KERN_WARNING
+                       "Can not alloc domain group for node %d\n", i);
+                       continue;
+               }
+               sg->cpu_power = 0;
+               sg->cpumask = nodemask;
+               cpus_or(covered, covered, nodemask);
+               prev = sg;
+
+               for (j = 0; j < MAX_NUMNODES; j++) {
+                       cpumask_t tmp, notcovered;
+                       int n = (i + j) % MAX_NUMNODES;
+
+                       cpus_complement(notcovered, covered);
+                       cpus_and(tmp, notcovered, *cpu_map);
+                       cpus_and(tmp, tmp, domainspan);
+                       if (cpus_empty(tmp))
+                               break;
+
+                       nodemask = node_to_cpumask(n);
+                       cpus_and(tmp, tmp, nodemask);
+                       if (cpus_empty(tmp))
+                               continue;
+
+                       sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+                       if (!sg) {
+                               printk(KERN_WARNING
+                               "Can not alloc domain group for node %d\n", j);
+                               break;
+                       }
+                       sg->cpu_power = 0;
+                       sg->cpumask = tmp;
+                       cpus_or(covered, covered, tmp);
+                       prev->next = sg;
+                       prev = sg;
+               }
+               prev->next = sched_group_nodes[i];
+       }
 #endif
 
        /* Calculate CPU power for physical packages and nodes */
-       for_each_cpu_mask(i, cpu_default_map) {
+       for_each_cpu_mask(i, *cpu_map) {
                int power;
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
@@ -5035,16 +5389,48 @@ static void __devinit arch_init_sched_domains(void)
                sd->groups->cpu_power = power;
 
 #ifdef CONFIG_NUMA
-               if (i == first_cpu(sd->groups->cpumask)) {
-                       /* Only add "power" once for each physical package. */
-                       sd = &per_cpu(node_domains, i);
-                       sd->groups->cpu_power += power;
+               sd = &per_cpu(allnodes_domains, i);
+               if (sd->groups) {
+                       power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
+                               (cpus_weight(sd->groups->cpumask)-1) / 10;
+                       sd->groups->cpu_power = power;
                }
 #endif
        }
 
+#ifdef CONFIG_NUMA
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               struct sched_group *sg = sched_group_nodes[i];
+               int j;
+
+               if (sg == NULL)
+                       continue;
+next_sg:
+               for_each_cpu_mask(j, sg->cpumask) {
+                       struct sched_domain *sd;
+                       int power;
+
+                       sd = &per_cpu(phys_domains, j);
+                       if (j != first_cpu(sd->groups->cpumask)) {
+                               /*
+                                * Only add "power" once for each
+                                * physical package.
+                                */
+                               continue;
+                       }
+                       power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
+                               (cpus_weight(sd->groups->cpumask)-1) / 10;
+
+                       sg->cpu_power += power;
+               }
+               sg = sg->next;
+               if (sg != sched_group_nodes[i])
+                       goto next_sg;
+       }
+#endif
+
        /* Attach the domains */
-       for_each_online_cpu(i) {
+       for_each_cpu_mask(i, *cpu_map) {
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i);
@@ -5054,15 +5440,104 @@ static void __devinit arch_init_sched_domains(void)
                cpu_attach_domain(sd, i);
        }
 }
-
-#ifdef CONFIG_HOTPLUG_CPU
-static void __devinit arch_destroy_sched_domains(void)
+/*
+ * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
+ */
+static void arch_init_sched_domains(const cpumask_t *cpu_map)
 {
-       /* Do nothing: everything is statically allocated. */
+       cpumask_t cpu_default_map;
+
+       /*
+        * Setup mask for cpus without special case scheduling requirements.
+        * For now this just excludes isolated cpus, but could be used to
+        * exclude other special cases in the future.
+        */
+       cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
+
+       build_sched_domains(&cpu_default_map);
 }
+
+static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+{
+#ifdef CONFIG_NUMA
+       int i;
+       int cpu;
+
+       for_each_cpu_mask(cpu, *cpu_map) {
+               struct sched_group *sched_group_allnodes
+                       = sched_group_allnodes_bycpu[cpu];
+               struct sched_group **sched_group_nodes
+                       = sched_group_nodes_bycpu[cpu];
+
+               if (sched_group_allnodes) {
+                       kfree(sched_group_allnodes);
+                       sched_group_allnodes_bycpu[cpu] = NULL;
+               }
+
+               if (!sched_group_nodes)
+                       continue;
+
+               for (i = 0; i < MAX_NUMNODES; i++) {
+                       cpumask_t nodemask = node_to_cpumask(i);
+                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+                       cpus_and(nodemask, nodemask, *cpu_map);
+                       if (cpus_empty(nodemask))
+                               continue;
+
+                       if (sg == NULL)
+                               continue;
+                       sg = sg->next;
+next_sg:
+                       oldsg = sg;
+                       sg = sg->next;
+                       kfree(oldsg);
+                       if (oldsg != sched_group_nodes[i])
+                               goto next_sg;
+               }
+               kfree(sched_group_nodes);
+               sched_group_nodes_bycpu[cpu] = NULL;
+       }
 #endif
+}
+
+/*
+ * Detach sched domains from a group of cpus specified in cpu_map
+ * These cpus will now be attached to the NULL domain
+ */
+static inline void detach_destroy_domains(const cpumask_t *cpu_map)
+{
+       int i;
+
+       for_each_cpu_mask(i, *cpu_map)
+               cpu_attach_domain(NULL, i);
+       synchronize_sched();
+       arch_destroy_sched_domains(cpu_map);
+}
+
+/*
+ * Partition sched domains as specified by the cpumasks below.
+ * This attaches all cpus from the cpumasks to the NULL domain,
+ * waits for a RCU quiescent period, recalculates sched
+ * domain information and then attaches them back to the
+ * correct sched domains
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+{
+       cpumask_t change_map;
+
+       cpus_and(*partition1, *partition1, cpu_online_map);
+       cpus_and(*partition2, *partition2, cpu_online_map);
+       cpus_or(change_map, *partition1, *partition2);
 
-#endif /* ARCH_HAS_SCHED_DOMAIN */
+       /* Detach sched domains from all of the affected cpus */
+       detach_destroy_domains(&change_map);
+       if (!cpus_empty(*partition1))
+               build_sched_domains(partition1);
+       if (!cpus_empty(*partition2))
+               build_sched_domains(partition2);
+}
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
@@ -5074,14 +5549,10 @@ static void __devinit arch_destroy_sched_domains(void)
 static int update_sched_domains(struct notifier_block *nfb,
                                unsigned long action, void *hcpu)
 {
-       int i;
-
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_DOWN_PREPARE:
-               for_each_online_cpu(i)
-                       cpu_attach_domain(NULL, i);
-               arch_destroy_sched_domains();
+               detach_destroy_domains(&cpu_online_map);
                return NOTIFY_OK;
 
        case CPU_UP_CANCELED:
@@ -5097,7 +5568,7 @@ static int update_sched_domains(struct notifier_block *nfb,
        }
 
        /* The hotplug lock is already held by cpu_up/cpu_down */
-       arch_init_sched_domains();
+       arch_init_sched_domains(&cpu_online_map);
 
        return NOTIFY_OK;
 }
@@ -5106,7 +5577,7 @@ static int update_sched_domains(struct notifier_block *nfb,
 void __init sched_init_smp(void)
 {
        lock_cpu_hotplug();
-       arch_init_sched_domains();
+       arch_init_sched_domains(&cpu_online_map);
        unlock_cpu_hotplug();
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);
@@ -5230,3 +5701,47 @@ void normalize_rt_tasks(void)
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_IA64
+/*
+ * These functions are only useful for the IA64 MCA handling.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given cpu.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+task_t *curr_task(int cpu)
+{
+       return cpu_curr(cpu);
+}
+
+/**
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack.  It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner.  This function
+ * must be called with all CPU's synchronized, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void set_curr_task(int cpu, task_t *p)
+{
+       cpu_curr(cpu) = p;
+}
+
+#endif