Merge branch 'linus' into tracing/core
[safe/jmp/linux-2.6] / kernel / sched_fair.c
index 3816f21..652e8bd 100644 (file)
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
        return min_vruntime;
 }
 
+static inline int entity_before(struct sched_entity *a,
+                               struct sched_entity *b)
+{
+       return (s64)(a->vruntime - b->vruntime) < 0;
+}
+
 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        return se->vruntime - cfs_rq->min_vruntime;
@@ -430,12 +436,13 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
        for_each_sched_entity(se) {
                struct load_weight *load;
+               struct load_weight lw;
 
                cfs_rq = cfs_rq_of(se);
                load = &cfs_rq->load;
 
                if (unlikely(!se->on_rq)) {
-                       struct load_weight lw = cfs_rq->load;
+                       lw = cfs_rq->load;
 
                        update_load_add(&lw, se->load.weight);
                        load = &lw;
@@ -604,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 #ifdef CONFIG_SCHEDSTATS
+       struct task_struct *tsk = NULL;
+
+       if (entity_is_task(se))
+               tsk = task_of(se);
+
        if (se->sleep_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
-               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -617,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                se->sleep_start = 0;
                se->sum_sleep_runtime += delta;
 
-               account_scheduler_latency(tsk, delta >> 10, 1);
+               if (tsk)
+                       account_scheduler_latency(tsk, delta >> 10, 1);
        }
        if (se->block_start) {
                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
-               struct task_struct *tsk = task_of(se);
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -632,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                se->block_start = 0;
                se->sum_sleep_runtime += delta;
 
-               /*
-                * Blocking time is in units of nanosecs, so shift by 20 to
-                * get a milliseconds-range estimation of the amount of
-                * time that the task spent sleeping:
-                */
-               if (unlikely(prof_on == SLEEP_PROFILING)) {
-
-                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
-                                    delta >> 20);
+               if (tsk) {
+                       /*
+                        * Blocking time is in units of nanosecs, so shift by
+                        * 20 to get a milliseconds-range estimation of the
+                        * amount of time that the task spent sleeping:
+                        */
+                       if (unlikely(prof_on == SLEEP_PROFILING)) {
+                               profile_hits(SLEEP_PROFILING,
+                                               (void *)get_wchan(tsk),
+                                               delta >> 20);
+                       }
+                       account_scheduler_latency(tsk, delta >> 10, 0);
                }
-               account_scheduler_latency(tsk, delta >> 10, 0);
        }
 #endif
 }
@@ -686,7 +699,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
                         * all of which have the same weight.
                         */
                        if (sched_feat(NORMALIZED_SLEEPER) &&
-                                       task_of(se)->policy != SCHED_IDLE)
+                                       (!entity_is_task(se) ||
+                                        task_of(se)->policy != SCHED_IDLE))
                                thresh = calc_delta_fair(thresh, se);
 
                        vruntime -= thresh;
@@ -1015,7 +1029,7 @@ static void yield_task_fair(struct rq *rq)
        /*
         * Already in the rightmost position?
         */
-       if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
+       if (unlikely(!rightmost || entity_before(rightmost, se)))
                return;
 
        /*
@@ -1487,17 +1501,10 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
 
        find_matching_se(&se, &pse);
 
-       while (se) {
-               BUG_ON(!pse);
-
-               if (wakeup_preempt_entity(se, pse) == 1) {
-                       resched_task(curr);
-                       break;
-               }
+       BUG_ON(!pse);
 
-               se = parent_entity(se);
-               pse = parent_entity(pse);
-       }
+       if (wakeup_preempt_entity(se, pse) == 1)
+               resched_task(curr);
 }
 
 static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1718,7 +1725,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
 
        /* 'curr' will be NULL if the child belongs to a different group */
        if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
-                       curr && curr->vruntime < se->vruntime) {
+                       curr && entity_before(curr, se)) {
                /*
                 * Upon rescheduling, sched_class::put_prev_task() will place
                 * 'current' within the tree based on its new key value.