Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[safe/jmp/linux-2.6] / kernel / sched.c
index b533d6d..6c10fa7 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/delayacct.h>
 #include <linux/reciprocal_div.h>
 #include <linux/unistd.h>
+#include <linux/pagemap.h>
 
 #include <asm/tlb.h>
 
@@ -668,7 +669,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
 /*
  * Shift right and round:
  */
-#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
 
 static unsigned long
 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
@@ -684,10 +685,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
         * Check whether we'd overflow the 64-bit multiplication:
         */
        if (unlikely(tmp > WMULT_CONST))
-               tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
+               tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
                        WMULT_SHIFT/2);
        else
-               tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
+               tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
 
        return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
 }
@@ -858,7 +859,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
 
 static void set_load_weight(struct task_struct *p)
 {
-       task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
        p->se.wait_runtime = 0;
 
        if (task_has_rt_policy(p)) {
@@ -1683,6 +1683,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
 
        p->prio = effective_prio(p);
 
+       if (rt_prio(p->prio))
+               p->sched_class = &rt_sched_class;
+       else
+               p->sched_class = &fair_sched_class;
+
        if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
                        (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
                        !current->se.on_rq) {
@@ -2512,7 +2517,7 @@ group_next:
         * a think about bumping its value to force at least one task to be
         * moved
         */
-       if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
+       if (*imbalance < busiest_load_per_task) {
                unsigned long tmp, pwr_now, pwr_move;
                unsigned int imbn;
 
@@ -2564,10 +2569,8 @@ small_imbalance:
                pwr_move /= SCHED_LOAD_SCALE;
 
                /* Move if we gain throughput */
-               if (pwr_move <= pwr_now)
-                       goto out_balanced;
-
-               *imbalance = busiest_load_per_task;
+               if (pwr_move > pwr_now)
+                       *imbalance = busiest_load_per_task;
        }
 
        return busiest;
@@ -4553,10 +4556,7 @@ asmlinkage long sys_sched_yield(void)
        struct rq *rq = this_rq_lock();
 
        schedstat_inc(rq, yld_cnt);
-       if (unlikely(rq->nr_running == 1))
-               schedstat_inc(rq, yld_act_empty);
-       else
-               current->sched_class->yield_task(rq, current);
+       current->sched_class->yield_task(rq, current);
 
        /*
         * Since we are going to call schedule() anyway, there's