2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
27 * Targeted preemption latency for CPU-bound tasks:
28 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
30 * NOTE: this latency value is not the same as the concept of
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
38 unsigned int sysctl_sched_latency = 5000000ULL;
39 unsigned int normalized_sysctl_sched_latency = 5000000ULL;
42 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
50 enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
54 * Minimal preemption granularity for CPU-bound tasks:
55 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
57 unsigned int sysctl_sched_min_granularity = 1000000ULL;
58 unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
63 static unsigned int sched_nr_latency = 5;
66 * After fork, child runs first. If set to 0 (default) then
67 * parent will (try to) run first.
69 unsigned int sysctl_sched_child_runs_first __read_mostly;
72 * sys_sched_yield() compat mode
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
77 unsigned int __read_mostly sysctl_sched_compat_yield;
80 * SCHED_OTHER wake-up granularity.
81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
87 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
90 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
92 static const struct sched_class fair_sched_class;
94 /**************************************************************
95 * CFS operations on generic schedulable entities:
98 #ifdef CONFIG_FAIR_GROUP_SCHED
100 /* cpu runqueue to which this cfs_rq is attached */
101 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
106 /* An entity is a task if it doesn't "own" a runqueue */
107 #define entity_is_task(se) (!se->my_q)
109 static inline struct task_struct *task_of(struct sched_entity *se)
111 #ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
114 return container_of(se, struct task_struct, se);
117 /* Walk up scheduling entities hierarchy */
118 #define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
121 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
126 /* runqueue on which this entity is (to be) queued */
127 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
132 /* runqueue "owned" by this group */
133 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
138 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
141 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
143 return cfs_rq->tg->cfs_rq[this_cpu];
146 /* Iterate thr' all leaf cfs_rq's on a runqueue */
147 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
150 /* Do the two (enqueued) entities belong to the same group ? */
152 is_same_group(struct sched_entity *se, struct sched_entity *pse)
154 if (se->cfs_rq == pse->cfs_rq)
160 static inline struct sched_entity *parent_entity(struct sched_entity *se)
165 /* return depth at which a sched entity is present in the hierarchy */
166 static inline int depth_se(struct sched_entity *se)
170 for_each_sched_entity(se)
177 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
179 int se_depth, pse_depth;
182 * preemption test can be made between sibling entities who are in the
183 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184 * both tasks until we find their ancestors who are siblings of common
188 /* First walk up until both entities are at same depth */
189 se_depth = depth_se(*se);
190 pse_depth = depth_se(*pse);
192 while (se_depth > pse_depth) {
194 *se = parent_entity(*se);
197 while (pse_depth > se_depth) {
199 *pse = parent_entity(*pse);
202 while (!is_same_group(*se, *pse)) {
203 *se = parent_entity(*se);
204 *pse = parent_entity(*pse);
208 #else /* !CONFIG_FAIR_GROUP_SCHED */
210 static inline struct task_struct *task_of(struct sched_entity *se)
212 return container_of(se, struct task_struct, se);
215 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
217 return container_of(cfs_rq, struct rq, cfs);
220 #define entity_is_task(se) 1
222 #define for_each_sched_entity(se) \
223 for (; se; se = NULL)
225 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
227 return &task_rq(p)->cfs;
230 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
232 struct task_struct *p = task_of(se);
233 struct rq *rq = task_rq(p);
238 /* runqueue "owned" by this group */
239 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
244 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
246 return &cpu_rq(this_cpu)->cfs;
249 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
253 is_same_group(struct sched_entity *se, struct sched_entity *pse)
258 static inline struct sched_entity *parent_entity(struct sched_entity *se)
264 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
268 #endif /* CONFIG_FAIR_GROUP_SCHED */
271 /**************************************************************
272 * Scheduling class tree data structure manipulation methods:
275 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
277 s64 delta = (s64)(vruntime - min_vruntime);
279 min_vruntime = vruntime;
284 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
286 s64 delta = (s64)(vruntime - min_vruntime);
288 min_vruntime = vruntime;
293 static inline int entity_before(struct sched_entity *a,
294 struct sched_entity *b)
296 return (s64)(a->vruntime - b->vruntime) < 0;
299 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
301 return se->vruntime - cfs_rq->min_vruntime;
304 static void update_min_vruntime(struct cfs_rq *cfs_rq)
306 u64 vruntime = cfs_rq->min_vruntime;
309 vruntime = cfs_rq->curr->vruntime;
311 if (cfs_rq->rb_leftmost) {
312 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
317 vruntime = se->vruntime;
319 vruntime = min_vruntime(vruntime, se->vruntime);
322 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
326 * Enqueue an entity into the rb-tree:
328 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
330 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331 struct rb_node *parent = NULL;
332 struct sched_entity *entry;
333 s64 key = entity_key(cfs_rq, se);
337 * Find the right place in the rbtree:
341 entry = rb_entry(parent, struct sched_entity, run_node);
343 * We dont care about collisions. Nodes with
344 * the same key stay together.
346 if (key < entity_key(cfs_rq, entry)) {
347 link = &parent->rb_left;
349 link = &parent->rb_right;
355 * Maintain a cache of leftmost tree entries (it is frequently
359 cfs_rq->rb_leftmost = &se->run_node;
361 rb_link_node(&se->run_node, parent, link);
362 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
365 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
367 if (cfs_rq->rb_leftmost == &se->run_node) {
368 struct rb_node *next_node;
370 next_node = rb_next(&se->run_node);
371 cfs_rq->rb_leftmost = next_node;
374 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
377 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
379 struct rb_node *left = cfs_rq->rb_leftmost;
384 return rb_entry(left, struct sched_entity, run_node);
387 static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
389 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
394 return rb_entry(last, struct sched_entity, run_node);
397 /**************************************************************
398 * Scheduling class statistics methods:
401 #ifdef CONFIG_SCHED_DEBUG
402 int sched_nr_latency_handler(struct ctl_table *table, int write,
403 void __user *buffer, size_t *lenp,
406 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
411 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
412 sysctl_sched_min_granularity);
421 static inline unsigned long
422 calc_delta_fair(unsigned long delta, struct sched_entity *se)
424 if (unlikely(se->load.weight != NICE_0_LOAD))
425 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
431 * The idea is to set a period in which each task runs once.
433 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
434 * this period because otherwise the slices get too small.
436 * p = (nr <= nl) ? l : l*nr/nl
438 static u64 __sched_period(unsigned long nr_running)
440 u64 period = sysctl_sched_latency;
441 unsigned long nr_latency = sched_nr_latency;
443 if (unlikely(nr_running > nr_latency)) {
444 period = sysctl_sched_min_granularity;
445 period *= nr_running;
452 * We calculate the wall-time slice from the period by taking a part
453 * proportional to the weight.
457 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
459 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
461 for_each_sched_entity(se) {
462 struct load_weight *load;
463 struct load_weight lw;
465 cfs_rq = cfs_rq_of(se);
466 load = &cfs_rq->load;
468 if (unlikely(!se->on_rq)) {
471 update_load_add(&lw, se->load.weight);
474 slice = calc_delta_mine(slice, se->load.weight, load);
480 * We calculate the vruntime slice of a to be inserted task
484 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
486 return calc_delta_fair(sched_slice(cfs_rq, se), se);
490 * Update the current task's runtime statistics. Skip current tasks that
491 * are not in our scheduling class.
494 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
495 unsigned long delta_exec)
497 unsigned long delta_exec_weighted;
499 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
501 curr->sum_exec_runtime += delta_exec;
502 schedstat_add(cfs_rq, exec_clock, delta_exec);
503 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
504 curr->vruntime += delta_exec_weighted;
505 update_min_vruntime(cfs_rq);
508 static void update_curr(struct cfs_rq *cfs_rq)
510 struct sched_entity *curr = cfs_rq->curr;
511 u64 now = rq_of(cfs_rq)->clock;
512 unsigned long delta_exec;
518 * Get the amount of time the current task was running
519 * since the last time we changed load (this cannot
520 * overflow on 32 bits):
522 delta_exec = (unsigned long)(now - curr->exec_start);
526 __update_curr(cfs_rq, curr, delta_exec);
527 curr->exec_start = now;
529 if (entity_is_task(curr)) {
530 struct task_struct *curtask = task_of(curr);
532 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
533 cpuacct_charge(curtask, delta_exec);
534 account_group_exec_runtime(curtask, delta_exec);
539 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
541 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
545 * Task is being enqueued - update stats:
547 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
550 * Are we enqueueing a waiting task? (for current tasks
551 * a dequeue/enqueue event is a NOP)
553 if (se != cfs_rq->curr)
554 update_stats_wait_start(cfs_rq, se);
558 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
560 schedstat_set(se->wait_max, max(se->wait_max,
561 rq_of(cfs_rq)->clock - se->wait_start));
562 schedstat_set(se->wait_count, se->wait_count + 1);
563 schedstat_set(se->wait_sum, se->wait_sum +
564 rq_of(cfs_rq)->clock - se->wait_start);
565 #ifdef CONFIG_SCHEDSTATS
566 if (entity_is_task(se)) {
567 trace_sched_stat_wait(task_of(se),
568 rq_of(cfs_rq)->clock - se->wait_start);
571 schedstat_set(se->wait_start, 0);
575 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
578 * Mark the end of the wait period if dequeueing a
581 if (se != cfs_rq->curr)
582 update_stats_wait_end(cfs_rq, se);
586 * We are picking a new current task - update its stats:
589 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
592 * We are starting a new run period:
594 se->exec_start = rq_of(cfs_rq)->clock;
597 /**************************************************
598 * Scheduling class queueing methods:
601 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
603 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
605 cfs_rq->task_weight += weight;
609 add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
615 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
617 update_load_add(&cfs_rq->load, se->load.weight);
618 if (!parent_entity(se))
619 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
620 if (entity_is_task(se)) {
621 add_cfs_task_weight(cfs_rq, se->load.weight);
622 list_add(&se->group_node, &cfs_rq->tasks);
624 cfs_rq->nr_running++;
629 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
631 update_load_sub(&cfs_rq->load, se->load.weight);
632 if (!parent_entity(se))
633 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
634 if (entity_is_task(se)) {
635 add_cfs_task_weight(cfs_rq, -se->load.weight);
636 list_del_init(&se->group_node);
638 cfs_rq->nr_running--;
642 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
644 #ifdef CONFIG_SCHEDSTATS
645 struct task_struct *tsk = NULL;
647 if (entity_is_task(se))
650 if (se->sleep_start) {
651 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
656 if (unlikely(delta > se->sleep_max))
657 se->sleep_max = delta;
660 se->sum_sleep_runtime += delta;
663 account_scheduler_latency(tsk, delta >> 10, 1);
664 trace_sched_stat_sleep(tsk, delta);
667 if (se->block_start) {
668 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
673 if (unlikely(delta > se->block_max))
674 se->block_max = delta;
677 se->sum_sleep_runtime += delta;
680 if (tsk->in_iowait) {
681 se->iowait_sum += delta;
683 trace_sched_stat_iowait(tsk, delta);
687 * Blocking time is in units of nanosecs, so shift by
688 * 20 to get a milliseconds-range estimation of the
689 * amount of time that the task spent sleeping:
691 if (unlikely(prof_on == SLEEP_PROFILING)) {
692 profile_hits(SLEEP_PROFILING,
693 (void *)get_wchan(tsk),
696 account_scheduler_latency(tsk, delta >> 10, 0);
702 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
704 #ifdef CONFIG_SCHED_DEBUG
705 s64 d = se->vruntime - cfs_rq->min_vruntime;
710 if (d > 3*sysctl_sched_latency)
711 schedstat_inc(cfs_rq, nr_spread_over);
716 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
718 u64 vruntime = cfs_rq->min_vruntime;
721 * The 'current' period is already promised to the current tasks,
722 * however the extra weight of the new task will slow them down a
723 * little, place the new task so that it fits in the slot that
724 * stays open at the end.
726 if (initial && sched_feat(START_DEBIT))
727 vruntime += sched_vslice(cfs_rq, se);
729 /* sleeps up to a single latency don't count. */
730 if (!initial && sched_feat(FAIR_SLEEPERS)) {
731 unsigned long thresh = sysctl_sched_latency;
734 * Convert the sleeper threshold into virtual time.
735 * SCHED_IDLE is a special sub-class. We care about
736 * fairness only relative to other SCHED_IDLE tasks,
737 * all of which have the same weight.
739 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
740 task_of(se)->policy != SCHED_IDLE))
741 thresh = calc_delta_fair(thresh, se);
744 * Halve their sleep time's effect, to allow
745 * for a gentler effect of sleepers:
747 if (sched_feat(GENTLE_FAIR_SLEEPERS))
753 /* ensure we never gain time by being placed backwards. */
754 vruntime = max_vruntime(se->vruntime, vruntime);
756 se->vruntime = vruntime;
760 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
763 * Update run-time statistics of the 'current'.
766 account_entity_enqueue(cfs_rq, se);
769 place_entity(cfs_rq, se, 0);
770 enqueue_sleeper(cfs_rq, se);
773 update_stats_enqueue(cfs_rq, se);
774 check_spread(cfs_rq, se);
775 if (se != cfs_rq->curr)
776 __enqueue_entity(cfs_rq, se);
779 static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
781 if (!se || cfs_rq->last == se)
784 if (!se || cfs_rq->next == se)
788 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
790 for_each_sched_entity(se)
791 __clear_buddies(cfs_rq_of(se), se);
795 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
798 * Update run-time statistics of the 'current'.
802 update_stats_dequeue(cfs_rq, se);
804 #ifdef CONFIG_SCHEDSTATS
805 if (entity_is_task(se)) {
806 struct task_struct *tsk = task_of(se);
808 if (tsk->state & TASK_INTERRUPTIBLE)
809 se->sleep_start = rq_of(cfs_rq)->clock;
810 if (tsk->state & TASK_UNINTERRUPTIBLE)
811 se->block_start = rq_of(cfs_rq)->clock;
816 clear_buddies(cfs_rq, se);
818 if (se != cfs_rq->curr)
819 __dequeue_entity(cfs_rq, se);
820 account_entity_dequeue(cfs_rq, se);
821 update_min_vruntime(cfs_rq);
825 * Preempt the current task with a newly woken task if needed:
828 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
830 unsigned long ideal_runtime, delta_exec;
832 ideal_runtime = sched_slice(cfs_rq, curr);
833 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
834 if (delta_exec > ideal_runtime) {
835 resched_task(rq_of(cfs_rq)->curr);
837 * The current task ran long enough, ensure it doesn't get
838 * re-elected due to buddy favours.
840 clear_buddies(cfs_rq, curr);
845 * Ensure that a task that missed wakeup preemption by a
846 * narrow margin doesn't have to wait for a full slice.
847 * This also mitigates buddy induced latencies under load.
849 if (!sched_feat(WAKEUP_PREEMPT))
852 if (delta_exec < sysctl_sched_min_granularity)
855 if (cfs_rq->nr_running > 1) {
856 struct sched_entity *se = __pick_next_entity(cfs_rq);
857 s64 delta = curr->vruntime - se->vruntime;
859 if (delta > ideal_runtime)
860 resched_task(rq_of(cfs_rq)->curr);
865 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
867 /* 'current' is not kept within the tree. */
870 * Any task has to be enqueued before it get to execute on
871 * a CPU. So account for the time it spent waiting on the
874 update_stats_wait_end(cfs_rq, se);
875 __dequeue_entity(cfs_rq, se);
878 update_stats_curr_start(cfs_rq, se);
880 #ifdef CONFIG_SCHEDSTATS
882 * Track our maximum slice length, if the CPU's load is at
883 * least twice that of our own weight (i.e. dont track it
884 * when there are only lesser-weight tasks around):
886 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
887 se->slice_max = max(se->slice_max,
888 se->sum_exec_runtime - se->prev_sum_exec_runtime);
891 se->prev_sum_exec_runtime = se->sum_exec_runtime;
895 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
897 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
899 struct sched_entity *se = __pick_next_entity(cfs_rq);
900 struct sched_entity *left = se;
902 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
906 * Prefer last buddy, try to return the CPU to a preempted task.
908 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
911 clear_buddies(cfs_rq, se);
916 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
919 * If still on the runqueue then deactivate_task()
920 * was not called and update_curr() has to be done:
925 check_spread(cfs_rq, prev);
927 update_stats_wait_start(cfs_rq, prev);
928 /* Put 'current' back into the tree. */
929 __enqueue_entity(cfs_rq, prev);
935 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
938 * Update run-time statistics of the 'current'.
942 #ifdef CONFIG_SCHED_HRTICK
944 * queued ticks are scheduled to match the slice, so don't bother
945 * validating it and just reschedule.
948 resched_task(rq_of(cfs_rq)->curr);
952 * don't let the period tick interfere with the hrtick preemption
954 if (!sched_feat(DOUBLE_TICK) &&
955 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
959 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
960 check_preempt_tick(cfs_rq, curr);
963 /**************************************************
964 * CFS operations on tasks:
967 #ifdef CONFIG_SCHED_HRTICK
968 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
970 struct sched_entity *se = &p->se;
971 struct cfs_rq *cfs_rq = cfs_rq_of(se);
973 WARN_ON(task_rq(p) != rq);
975 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
976 u64 slice = sched_slice(cfs_rq, se);
977 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
978 s64 delta = slice - ran;
987 * Don't schedule slices shorter than 10000ns, that just
988 * doesn't make sense. Rely on vruntime for fairness.
991 delta = max_t(s64, 10000LL, delta);
993 hrtick_start(rq, delta);
998 * called from enqueue/dequeue and updates the hrtick when the
999 * current task is from our class and nr_running is low enough
1002 static void hrtick_update(struct rq *rq)
1004 struct task_struct *curr = rq->curr;
1006 if (curr->sched_class != &fair_sched_class)
1009 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1010 hrtick_start_fair(rq, curr);
1012 #else /* !CONFIG_SCHED_HRTICK */
1014 hrtick_start_fair(struct rq *rq, struct task_struct *p)
1018 static inline void hrtick_update(struct rq *rq)
1024 * The enqueue_task method is called before nr_running is
1025 * increased. Here we update the fair scheduling stats and
1026 * then put the task into the rbtree:
1028 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
1030 struct cfs_rq *cfs_rq;
1031 struct sched_entity *se = &p->se;
1033 for_each_sched_entity(se) {
1036 cfs_rq = cfs_rq_of(se);
1037 enqueue_entity(cfs_rq, se, wakeup);
1045 * The dequeue_task method is called before nr_running is
1046 * decreased. We remove the task from the rbtree and
1047 * update the fair scheduling stats:
1049 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
1051 struct cfs_rq *cfs_rq;
1052 struct sched_entity *se = &p->se;
1054 for_each_sched_entity(se) {
1055 cfs_rq = cfs_rq_of(se);
1056 dequeue_entity(cfs_rq, se, sleep);
1057 /* Don't dequeue parent if it has other entities besides us */
1058 if (cfs_rq->load.weight)
1067 * sched_yield() support is very simple - we dequeue and enqueue.
1069 * If compat_yield is turned on then we requeue to the end of the tree.
1071 static void yield_task_fair(struct rq *rq)
1073 struct task_struct *curr = rq->curr;
1074 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1075 struct sched_entity *rightmost, *se = &curr->se;
1078 * Are we the only task in the tree?
1080 if (unlikely(cfs_rq->nr_running == 1))
1083 clear_buddies(cfs_rq, se);
1085 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1086 update_rq_clock(rq);
1088 * Update run-time statistics of the 'current'.
1090 update_curr(cfs_rq);
1095 * Find the rightmost entry in the rbtree:
1097 rightmost = __pick_last_entity(cfs_rq);
1099 * Already in the rightmost position?
1101 if (unlikely(!rightmost || entity_before(rightmost, se)))
1105 * Minimally necessary key value to be last in the tree:
1106 * Upon rescheduling, sched_class::put_prev_task() will place
1107 * 'current' within the tree based on its new key value.
1109 se->vruntime = rightmost->vruntime + 1;
1114 #ifdef CONFIG_FAIR_GROUP_SCHED
1116 * effective_load() calculates the load change as seen from the root_task_group
1118 * Adding load to a group doesn't make a group heavier, but can cause movement
1119 * of group shares between cpus. Assuming the shares were perfectly aligned one
1120 * can calculate the shift in shares.
1122 * The problem is that perfectly aligning the shares is rather expensive, hence
1123 * we try to avoid doing that too often - see update_shares(), which ratelimits
1126 * We compensate this by not only taking the current delta into account, but
1127 * also considering the delta between when the shares were last adjusted and
1130 * We still saw a performance dip, some tracing learned us that between
1131 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1132 * significantly. Therefore try to bias the error in direction of failing
1133 * the affine wakeup.
1136 static long effective_load(struct task_group *tg, int cpu,
1139 struct sched_entity *se = tg->se[cpu];
1145 * By not taking the decrease of shares on the other cpu into
1146 * account our error leans towards reducing the affine wakeups.
1148 if (!wl && sched_feat(ASYM_EFF_LOAD))
1151 for_each_sched_entity(se) {
1152 long S, rw, s, a, b;
1156 * Instead of using this increment, also add the difference
1157 * between when the shares were last updated and now.
1159 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1163 S = se->my_q->tg->shares;
1164 s = se->my_q->shares;
1165 rw = se->my_q->rq_weight;
1176 * Assume the group is already running and will
1177 * thus already be accounted for in the weight.
1179 * That is, moving shares between CPUs, does not
1180 * alter the group weight.
1190 static inline unsigned long effective_load(struct task_group *tg, int cpu,
1191 unsigned long wl, unsigned long wg)
1198 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1200 struct task_struct *curr = current;
1201 unsigned long this_load, load;
1202 int idx, this_cpu, prev_cpu;
1203 unsigned long tl_per_task;
1204 unsigned int imbalance;
1205 struct task_group *tg;
1206 unsigned long weight;
1210 this_cpu = smp_processor_id();
1211 prev_cpu = task_cpu(p);
1212 load = source_load(prev_cpu, idx);
1213 this_load = target_load(this_cpu, idx);
1216 if (sched_feat(SYNC_LESS) &&
1217 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1218 p->se.avg_overlap > sysctl_sched_migration_cost))
1221 if (sched_feat(SYNC_MORE) &&
1222 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1223 p->se.avg_overlap < sysctl_sched_migration_cost))
1228 * If sync wakeup then subtract the (maximum possible)
1229 * effect of the currently running task from the load
1230 * of the current CPU:
1233 tg = task_group(current);
1234 weight = current->se.load.weight;
1236 this_load += effective_load(tg, this_cpu, -weight, -weight);
1237 load += effective_load(tg, prev_cpu, 0, -weight);
1241 weight = p->se.load.weight;
1243 imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1246 * In low-load situations, where prev_cpu is idle and this_cpu is idle
1247 * due to the sync cause above having dropped this_load to 0, we'll
1248 * always have an imbalance, but there's really nothing you can do
1249 * about that, so that's good too.
1251 * Otherwise check if either cpus are near enough in load to allow this
1252 * task to be woken on this_cpu.
1254 balanced = !this_load ||
1255 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
1256 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1259 * If the currently running task will sleep within
1260 * a reasonable amount of time then attract this newly
1263 if (sync && balanced)
1266 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1267 tl_per_task = cpu_avg_load_per_task(this_cpu);
1270 (this_load <= load &&
1271 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1273 * This domain has SD_WAKE_AFFINE and
1274 * p is cache cold in this domain, and
1275 * there is no bad imbalance.
1277 schedstat_inc(sd, ttwu_move_affine);
1278 schedstat_inc(p, se.nr_wakeups_affine);
1286 * find_idlest_group finds and returns the least busy CPU group within the
1289 static struct sched_group *
1290 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1291 int this_cpu, int load_idx)
1293 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1294 unsigned long min_load = ULONG_MAX, this_load = 0;
1295 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1298 unsigned long load, avg_load;
1302 /* Skip over this group if it has no CPUs allowed */
1303 if (!cpumask_intersects(sched_group_cpus(group),
1307 local_group = cpumask_test_cpu(this_cpu,
1308 sched_group_cpus(group));
1310 /* Tally up the load of all CPUs in the group */
1313 for_each_cpu(i, sched_group_cpus(group)) {
1314 /* Bias balancing toward cpus of our domain */
1316 load = source_load(i, load_idx);
1318 load = target_load(i, load_idx);
1323 /* Adjust by relative CPU power of the group */
1324 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1327 this_load = avg_load;
1329 } else if (avg_load < min_load) {
1330 min_load = avg_load;
1333 } while (group = group->next, group != sd->groups);
1335 if (!idlest || 100*this_load < imbalance*min_load)
1341 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1344 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1346 unsigned long load, min_load = ULONG_MAX;
1350 /* Traverse only the allowed CPUs */
1351 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1352 load = weighted_cpuload(i);
1354 if (load < min_load || (load == min_load && i == this_cpu)) {
1364 * Try and locate an idle CPU in the sched_domain.
1367 select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1369 int cpu = smp_processor_id();
1370 int prev_cpu = task_cpu(p);
1374 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1375 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1376 * always a better target than the current cpu.
1378 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1382 * Otherwise, iterate the domain and find an elegible idle cpu.
1384 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1385 if (!cpu_rq(i)->cfs.nr_running) {
1395 * sched_balance_self: balance the current task (running on cpu) in domains
1396 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1399 * Balance, ie. select the least loaded group.
1401 * Returns the target CPU number, or the same CPU if no balancing is needed.
1403 * preempt must be disabled.
1405 static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1407 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1408 int cpu = smp_processor_id();
1409 int prev_cpu = task_cpu(p);
1411 int want_affine = 0;
1413 int sync = wake_flags & WF_SYNC;
1415 if (sd_flag & SD_BALANCE_WAKE) {
1416 if (sched_feat(AFFINE_WAKEUPS) &&
1417 cpumask_test_cpu(cpu, &p->cpus_allowed))
1422 for_each_domain(cpu, tmp) {
1424 * If power savings logic is enabled for a domain, see if we
1425 * are not overloaded, if so, don't balance wider.
1427 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1428 unsigned long power = 0;
1429 unsigned long nr_running = 0;
1430 unsigned long capacity;
1433 for_each_cpu(i, sched_domain_span(tmp)) {
1434 power += power_of(i);
1435 nr_running += cpu_rq(i)->cfs.nr_running;
1438 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1440 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1443 if (nr_running < capacity)
1448 * While iterating the domains looking for a spanning
1449 * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1450 * in cache sharing domains along the way.
1456 * If both cpu and prev_cpu are part of this domain,
1457 * cpu is a valid SD_WAKE_AFFINE target.
1459 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1463 * If there's an idle sibling in this domain, make that
1464 * the wake_affine target instead of the current cpu.
1466 if (tmp->flags & SD_PREFER_SIBLING)
1467 target = select_idle_sibling(p, tmp, target);
1470 if (tmp->flags & SD_WAKE_AFFINE) {
1478 if (!want_sd && !want_affine)
1481 if (!(tmp->flags & sd_flag))
1488 if (sched_feat(LB_SHARES_UPDATE)) {
1490 * Pick the largest domain to update shares over
1493 if (affine_sd && (!tmp ||
1494 cpumask_weight(sched_domain_span(affine_sd)) >
1495 cpumask_weight(sched_domain_span(sd))))
1502 if (affine_sd && wake_affine(affine_sd, p, sync))
1506 int load_idx = sd->forkexec_idx;
1507 struct sched_group *group;
1510 if (!(sd->flags & sd_flag)) {
1515 if (sd_flag & SD_BALANCE_WAKE)
1516 load_idx = sd->wake_idx;
1518 group = find_idlest_group(sd, p, cpu, load_idx);
1524 new_cpu = find_idlest_cpu(group, p, cpu);
1525 if (new_cpu == -1 || new_cpu == cpu) {
1526 /* Now try balancing at a lower domain level of cpu */
1531 /* Now try balancing at a lower domain level of new_cpu */
1533 weight = cpumask_weight(sched_domain_span(sd));
1535 for_each_domain(cpu, tmp) {
1536 if (weight <= cpumask_weight(sched_domain_span(tmp)))
1538 if (tmp->flags & sd_flag)
1541 /* while loop will break here if sd == NULL */
1546 #endif /* CONFIG_SMP */
1549 * Adaptive granularity
1551 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1552 * with the limit of wakeup_gran -- when it never does a wakeup.
1554 * So the smaller avg_wakeup is the faster we want this task to preempt,
1555 * but we don't want to treat the preemptee unfairly and therefore allow it
1556 * to run for at least the amount of time we'd like to run.
1558 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1560 * NOTE: we use *nr_running to scale with load, this nicely matches the
1561 * degrading latency on load.
1563 static unsigned long
1564 adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1566 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1567 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1570 if (this_run < expected_wakeup)
1571 gran = expected_wakeup - this_run;
1573 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1576 static unsigned long
1577 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1579 unsigned long gran = sysctl_sched_wakeup_granularity;
1581 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1582 gran = adaptive_gran(curr, se);
1585 * Since its curr running now, convert the gran from real-time
1586 * to virtual-time in his units.
1588 if (sched_feat(ASYM_GRAN)) {
1590 * By using 'se' instead of 'curr' we penalize light tasks, so
1591 * they get preempted easier. That is, if 'se' < 'curr' then
1592 * the resulting gran will be larger, therefore penalizing the
1593 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1594 * be smaller, again penalizing the lighter task.
1596 * This is especially important for buddies when the leftmost
1597 * task is higher priority than the buddy.
1599 if (unlikely(se->load.weight != NICE_0_LOAD))
1600 gran = calc_delta_fair(gran, se);
1602 if (unlikely(curr->load.weight != NICE_0_LOAD))
1603 gran = calc_delta_fair(gran, curr);
1610 * Should 'se' preempt 'curr'.
1624 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1626 s64 gran, vdiff = curr->vruntime - se->vruntime;
1631 gran = wakeup_gran(curr, se);
1638 static void set_last_buddy(struct sched_entity *se)
1640 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1641 for_each_sched_entity(se)
1642 cfs_rq_of(se)->last = se;
1646 static void set_next_buddy(struct sched_entity *se)
1648 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1649 for_each_sched_entity(se)
1650 cfs_rq_of(se)->next = se;
1655 * Preempt the current task with a newly woken task if needed:
1657 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1659 struct task_struct *curr = rq->curr;
1660 struct sched_entity *se = &curr->se, *pse = &p->se;
1661 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1662 int sync = wake_flags & WF_SYNC;
1663 int scale = cfs_rq->nr_running >= sched_nr_latency;
1665 if (unlikely(rt_prio(p->prio)))
1668 if (unlikely(p->sched_class != &fair_sched_class))
1671 if (unlikely(se == pse))
1674 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1675 set_next_buddy(pse);
1678 * We can come here with TIF_NEED_RESCHED already set from new task
1681 if (test_tsk_need_resched(curr))
1685 * Batch and idle tasks do not preempt (their preemption is driven by
1688 if (unlikely(p->policy != SCHED_NORMAL))
1691 /* Idle tasks are by definition preempted by everybody. */
1692 if (unlikely(curr->policy == SCHED_IDLE))
1695 if (sched_feat(WAKEUP_SYNC) && sync)
1698 if (sched_feat(WAKEUP_OVERLAP) &&
1699 se->avg_overlap < sysctl_sched_migration_cost &&
1700 pse->avg_overlap < sysctl_sched_migration_cost)
1703 if (!sched_feat(WAKEUP_PREEMPT))
1706 update_curr(cfs_rq);
1707 find_matching_se(&se, &pse);
1709 if (wakeup_preempt_entity(se, pse) == 1)
1717 * Only set the backward buddy when the current task is still
1718 * on the rq. This can happen when a wakeup gets interleaved
1719 * with schedule on the ->pre_schedule() or idle_balance()
1720 * point, either of which can * drop the rq lock.
1722 * Also, during early boot the idle thread is in the fair class,
1723 * for obvious reasons its a bad idea to schedule back to it.
1725 if (unlikely(!se->on_rq || curr == rq->idle))
1728 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1732 static struct task_struct *pick_next_task_fair(struct rq *rq)
1734 struct task_struct *p;
1735 struct cfs_rq *cfs_rq = &rq->cfs;
1736 struct sched_entity *se;
1738 if (!cfs_rq->nr_running)
1742 se = pick_next_entity(cfs_rq);
1743 set_next_entity(cfs_rq, se);
1744 cfs_rq = group_cfs_rq(se);
1748 hrtick_start_fair(rq, p);
1754 * Account for a descheduled task:
1756 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1758 struct sched_entity *se = &prev->se;
1759 struct cfs_rq *cfs_rq;
1761 for_each_sched_entity(se) {
1762 cfs_rq = cfs_rq_of(se);
1763 put_prev_entity(cfs_rq, se);
1768 /**************************************************
1769 * Fair scheduling class load-balancing methods:
1773 * Load-balancing iterator. Note: while the runqueue stays locked
1774 * during the whole iteration, the current task might be
1775 * dequeued so the iterator has to be dequeue-safe. Here we
1776 * achieve that by always pre-iterating before returning
1779 static struct task_struct *
1780 __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1782 struct task_struct *p = NULL;
1783 struct sched_entity *se;
1785 if (next == &cfs_rq->tasks)
1788 se = list_entry(next, struct sched_entity, group_node);
1790 cfs_rq->balance_iterator = next->next;
1795 static struct task_struct *load_balance_start_fair(void *arg)
1797 struct cfs_rq *cfs_rq = arg;
1799 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1802 static struct task_struct *load_balance_next_fair(void *arg)
1804 struct cfs_rq *cfs_rq = arg;
1806 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1809 static unsigned long
1810 __load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1811 unsigned long max_load_move, struct sched_domain *sd,
1812 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1813 struct cfs_rq *cfs_rq)
1815 struct rq_iterator cfs_rq_iterator;
1817 cfs_rq_iterator.start = load_balance_start_fair;
1818 cfs_rq_iterator.next = load_balance_next_fair;
1819 cfs_rq_iterator.arg = cfs_rq;
1821 return balance_tasks(this_rq, this_cpu, busiest,
1822 max_load_move, sd, idle, all_pinned,
1823 this_best_prio, &cfs_rq_iterator);
1826 #ifdef CONFIG_FAIR_GROUP_SCHED
1827 static unsigned long
1828 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1829 unsigned long max_load_move,
1830 struct sched_domain *sd, enum cpu_idle_type idle,
1831 int *all_pinned, int *this_best_prio)
1833 long rem_load_move = max_load_move;
1834 int busiest_cpu = cpu_of(busiest);
1835 struct task_group *tg;
1838 update_h_load(busiest_cpu);
1840 list_for_each_entry_rcu(tg, &task_groups, list) {
1841 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1842 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1843 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1844 u64 rem_load, moved_load;
1849 if (!busiest_cfs_rq->task_weight)
1852 rem_load = (u64)rem_load_move * busiest_weight;
1853 rem_load = div_u64(rem_load, busiest_h_load + 1);
1855 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1856 rem_load, sd, idle, all_pinned, this_best_prio,
1857 tg->cfs_rq[busiest_cpu]);
1862 moved_load *= busiest_h_load;
1863 moved_load = div_u64(moved_load, busiest_weight + 1);
1865 rem_load_move -= moved_load;
1866 if (rem_load_move < 0)
1871 return max_load_move - rem_load_move;
1874 static unsigned long
1875 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1876 unsigned long max_load_move,
1877 struct sched_domain *sd, enum cpu_idle_type idle,
1878 int *all_pinned, int *this_best_prio)
1880 return __load_balance_fair(this_rq, this_cpu, busiest,
1881 max_load_move, sd, idle, all_pinned,
1882 this_best_prio, &busiest->cfs);
1887 move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1888 struct sched_domain *sd, enum cpu_idle_type idle)
1890 struct cfs_rq *busy_cfs_rq;
1891 struct rq_iterator cfs_rq_iterator;
1893 cfs_rq_iterator.start = load_balance_start_fair;
1894 cfs_rq_iterator.next = load_balance_next_fair;
1896 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1898 * pass busy_cfs_rq argument into
1899 * load_balance_[start|next]_fair iterators
1901 cfs_rq_iterator.arg = busy_cfs_rq;
1902 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1910 static void rq_online_fair(struct rq *rq)
1915 static void rq_offline_fair(struct rq *rq)
1920 #endif /* CONFIG_SMP */
1923 * scheduler tick hitting a task of our scheduling class:
1925 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1927 struct cfs_rq *cfs_rq;
1928 struct sched_entity *se = &curr->se;
1930 for_each_sched_entity(se) {
1931 cfs_rq = cfs_rq_of(se);
1932 entity_tick(cfs_rq, se, queued);
1937 * called on fork with the child task as argument from the parent's context
1938 * - child not yet on the tasklist
1939 * - preemption disabled
1941 static void task_fork_fair(struct task_struct *p)
1943 struct cfs_rq *cfs_rq = task_cfs_rq(current);
1944 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1945 int this_cpu = smp_processor_id();
1946 struct rq *rq = this_rq();
1947 unsigned long flags;
1949 spin_lock_irqsave(&rq->lock, flags);
1951 if (unlikely(task_cpu(p) != this_cpu))
1952 __set_task_cpu(p, this_cpu);
1954 update_curr(cfs_rq);
1957 se->vruntime = curr->vruntime;
1958 place_entity(cfs_rq, se, 1);
1960 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
1962 * Upon rescheduling, sched_class::put_prev_task() will place
1963 * 'current' within the tree based on its new key value.
1965 swap(curr->vruntime, se->vruntime);
1966 resched_task(rq->curr);
1969 spin_unlock_irqrestore(&rq->lock, flags);
1973 * Priority of the task has changed. Check to see if we preempt
1976 static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1977 int oldprio, int running)
1980 * Reschedule if we are currently running on this runqueue and
1981 * our priority decreased, or if we are not currently running on
1982 * this runqueue and our priority is higher than the current's
1985 if (p->prio > oldprio)
1986 resched_task(rq->curr);
1988 check_preempt_curr(rq, p, 0);
1992 * We switched to the sched_fair class.
1994 static void switched_to_fair(struct rq *rq, struct task_struct *p,
1998 * We were most likely switched from sched_rt, so
1999 * kick off the schedule if running, otherwise just see
2000 * if we can still preempt the current task.
2003 resched_task(rq->curr);
2005 check_preempt_curr(rq, p, 0);
2008 /* Account for a task changing its policy or group.
2010 * This routine is mostly called to set cfs_rq->curr field when a task
2011 * migrates between groups/classes.
2013 static void set_curr_task_fair(struct rq *rq)
2015 struct sched_entity *se = &rq->curr->se;
2017 for_each_sched_entity(se)
2018 set_next_entity(cfs_rq_of(se), se);
2021 #ifdef CONFIG_FAIR_GROUP_SCHED
2022 static void moved_group_fair(struct task_struct *p)
2024 struct cfs_rq *cfs_rq = task_cfs_rq(p);
2026 update_curr(cfs_rq);
2027 place_entity(cfs_rq, &p->se, 1);
2031 unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
2033 struct sched_entity *se = &task->se;
2034 unsigned int rr_interval = 0;
2037 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
2040 if (rq->cfs.load.weight)
2041 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
2047 * All the scheduling class methods:
2049 static const struct sched_class fair_sched_class = {
2050 .next = &idle_sched_class,
2051 .enqueue_task = enqueue_task_fair,
2052 .dequeue_task = dequeue_task_fair,
2053 .yield_task = yield_task_fair,
2055 .check_preempt_curr = check_preempt_wakeup,
2057 .pick_next_task = pick_next_task_fair,
2058 .put_prev_task = put_prev_task_fair,
2061 .select_task_rq = select_task_rq_fair,
2063 .load_balance = load_balance_fair,
2064 .move_one_task = move_one_task_fair,
2065 .rq_online = rq_online_fair,
2066 .rq_offline = rq_offline_fair,
2069 .set_curr_task = set_curr_task_fair,
2070 .task_tick = task_tick_fair,
2071 .task_fork = task_fork_fair,
2073 .prio_changed = prio_changed_fair,
2074 .switched_to = switched_to_fair,
2076 .get_rr_interval = get_rr_interval_fair,
2078 #ifdef CONFIG_FAIR_GROUP_SCHED
2079 .moved_group = moved_group_fair,
2083 #ifdef CONFIG_SCHED_DEBUG
2084 static void print_cfs_stats(struct seq_file *m, int cpu)
2086 struct cfs_rq *cfs_rq;
2089 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2090 print_cfs_rq(m, cpu, cfs_rq);