2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
18 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
27 atomic_inc(&rq->rd->rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
40 static void update_rt_migration(struct rq *rq)
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
45 rq->rt.overloaded = 1;
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
53 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
56 plist_node_init(&p->pushable_tasks, p->prio);
57 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
60 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
62 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
68 void enqueue_pushable_task(struct rq *rq, struct task_struct *p) {}
70 void dequeue_pushable_task(struct rq *rq, struct task_struct *p) {}
72 #endif /* CONFIG_SMP */
74 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
76 return container_of(rt_se, struct task_struct, rt);
79 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
81 return !list_empty(&rt_se->run_list);
84 #ifdef CONFIG_RT_GROUP_SCHED
86 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
91 return rt_rq->rt_runtime;
94 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
96 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
99 #define for_each_leaf_rt_rq(rt_rq, rq) \
100 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
102 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
107 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
112 #define for_each_sched_rt_entity(rt_se) \
113 for (; rt_se; rt_se = rt_se->parent)
115 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
120 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
121 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
123 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
125 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
126 struct sched_rt_entity *rt_se = rt_rq->rt_se;
128 if (rt_rq->rt_nr_running) {
129 if (rt_se && !on_rt_rq(rt_se))
130 enqueue_rt_entity(rt_se);
131 if (rt_rq->highest_prio.curr < curr->prio)
136 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
138 struct sched_rt_entity *rt_se = rt_rq->rt_se;
140 if (rt_se && on_rt_rq(rt_se))
141 dequeue_rt_entity(rt_se);
144 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
146 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
149 static int rt_se_boosted(struct sched_rt_entity *rt_se)
151 struct rt_rq *rt_rq = group_rt_rq(rt_se);
152 struct task_struct *p;
155 return !!rt_rq->rt_nr_boosted;
157 p = rt_task_of(rt_se);
158 return p->prio != p->normal_prio;
162 static inline const struct cpumask *sched_rt_period_mask(void)
164 return cpu_rq(smp_processor_id())->rd->span;
167 static inline const struct cpumask *sched_rt_period_mask(void)
169 return cpu_online_mask;
174 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
176 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
179 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
181 return &rt_rq->tg->rt_bandwidth;
184 #else /* !CONFIG_RT_GROUP_SCHED */
186 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
188 return rt_rq->rt_runtime;
191 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
193 return ktime_to_ns(def_rt_bandwidth.rt_period);
196 #define for_each_leaf_rt_rq(rt_rq, rq) \
197 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
199 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
201 return container_of(rt_rq, struct rq, rt);
204 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
206 struct task_struct *p = rt_task_of(rt_se);
207 struct rq *rq = task_rq(p);
212 #define for_each_sched_rt_entity(rt_se) \
213 for (; rt_se; rt_se = NULL)
215 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
220 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
222 if (rt_rq->rt_nr_running)
223 resched_task(rq_of_rt_rq(rt_rq)->curr);
226 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
230 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
232 return rt_rq->rt_throttled;
235 static inline const struct cpumask *sched_rt_period_mask(void)
237 return cpu_online_mask;
241 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
243 return &cpu_rq(cpu)->rt;
246 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
248 return &def_rt_bandwidth;
251 #endif /* CONFIG_RT_GROUP_SCHED */
255 * We ran out of runtime, see if we can borrow some from our neighbours.
257 static int do_balance_runtime(struct rt_rq *rt_rq)
259 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
260 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
261 int i, weight, more = 0;
264 weight = cpumask_weight(rd->span);
266 spin_lock(&rt_b->rt_runtime_lock);
267 rt_period = ktime_to_ns(rt_b->rt_period);
268 for_each_cpu(i, rd->span) {
269 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
275 spin_lock(&iter->rt_runtime_lock);
277 * Either all rqs have inf runtime and there's nothing to steal
278 * or __disable_runtime() below sets a specific rq to inf to
279 * indicate its been disabled and disalow stealing.
281 if (iter->rt_runtime == RUNTIME_INF)
285 * From runqueues with spare time, take 1/n part of their
286 * spare time, but no more than our period.
288 diff = iter->rt_runtime - iter->rt_time;
290 diff = div_u64((u64)diff, weight);
291 if (rt_rq->rt_runtime + diff > rt_period)
292 diff = rt_period - rt_rq->rt_runtime;
293 iter->rt_runtime -= diff;
294 rt_rq->rt_runtime += diff;
296 if (rt_rq->rt_runtime == rt_period) {
297 spin_unlock(&iter->rt_runtime_lock);
302 spin_unlock(&iter->rt_runtime_lock);
304 spin_unlock(&rt_b->rt_runtime_lock);
310 * Ensure this RQ takes back all the runtime it lend to its neighbours.
312 static void __disable_runtime(struct rq *rq)
314 struct root_domain *rd = rq->rd;
317 if (unlikely(!scheduler_running))
320 for_each_leaf_rt_rq(rt_rq, rq) {
321 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
325 spin_lock(&rt_b->rt_runtime_lock);
326 spin_lock(&rt_rq->rt_runtime_lock);
328 * Either we're all inf and nobody needs to borrow, or we're
329 * already disabled and thus have nothing to do, or we have
330 * exactly the right amount of runtime to take out.
332 if (rt_rq->rt_runtime == RUNTIME_INF ||
333 rt_rq->rt_runtime == rt_b->rt_runtime)
335 spin_unlock(&rt_rq->rt_runtime_lock);
338 * Calculate the difference between what we started out with
339 * and what we current have, that's the amount of runtime
340 * we lend and now have to reclaim.
342 want = rt_b->rt_runtime - rt_rq->rt_runtime;
345 * Greedy reclaim, take back as much as we can.
347 for_each_cpu(i, rd->span) {
348 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
352 * Can't reclaim from ourselves or disabled runqueues.
354 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
357 spin_lock(&iter->rt_runtime_lock);
359 diff = min_t(s64, iter->rt_runtime, want);
360 iter->rt_runtime -= diff;
363 iter->rt_runtime -= want;
366 spin_unlock(&iter->rt_runtime_lock);
372 spin_lock(&rt_rq->rt_runtime_lock);
374 * We cannot be left wanting - that would mean some runtime
375 * leaked out of the system.
380 * Disable all the borrow logic by pretending we have inf
381 * runtime - in which case borrowing doesn't make sense.
383 rt_rq->rt_runtime = RUNTIME_INF;
384 spin_unlock(&rt_rq->rt_runtime_lock);
385 spin_unlock(&rt_b->rt_runtime_lock);
389 static void disable_runtime(struct rq *rq)
393 spin_lock_irqsave(&rq->lock, flags);
394 __disable_runtime(rq);
395 spin_unlock_irqrestore(&rq->lock, flags);
398 static void __enable_runtime(struct rq *rq)
402 if (unlikely(!scheduler_running))
406 * Reset each runqueue's bandwidth settings
408 for_each_leaf_rt_rq(rt_rq, rq) {
409 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
411 spin_lock(&rt_b->rt_runtime_lock);
412 spin_lock(&rt_rq->rt_runtime_lock);
413 rt_rq->rt_runtime = rt_b->rt_runtime;
415 rt_rq->rt_throttled = 0;
416 spin_unlock(&rt_rq->rt_runtime_lock);
417 spin_unlock(&rt_b->rt_runtime_lock);
421 static void enable_runtime(struct rq *rq)
425 spin_lock_irqsave(&rq->lock, flags);
426 __enable_runtime(rq);
427 spin_unlock_irqrestore(&rq->lock, flags);
430 static int balance_runtime(struct rt_rq *rt_rq)
434 if (rt_rq->rt_time > rt_rq->rt_runtime) {
435 spin_unlock(&rt_rq->rt_runtime_lock);
436 more = do_balance_runtime(rt_rq);
437 spin_lock(&rt_rq->rt_runtime_lock);
442 #else /* !CONFIG_SMP */
443 static inline int balance_runtime(struct rt_rq *rt_rq)
447 #endif /* CONFIG_SMP */
449 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
452 const struct cpumask *span;
454 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
457 span = sched_rt_period_mask();
458 for_each_cpu(i, span) {
460 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
461 struct rq *rq = rq_of_rt_rq(rt_rq);
463 spin_lock(&rq->lock);
464 if (rt_rq->rt_time) {
467 spin_lock(&rt_rq->rt_runtime_lock);
468 if (rt_rq->rt_throttled)
469 balance_runtime(rt_rq);
470 runtime = rt_rq->rt_runtime;
471 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
472 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
473 rt_rq->rt_throttled = 0;
476 if (rt_rq->rt_time || rt_rq->rt_nr_running)
478 spin_unlock(&rt_rq->rt_runtime_lock);
479 } else if (rt_rq->rt_nr_running)
483 sched_rt_rq_enqueue(rt_rq);
484 spin_unlock(&rq->lock);
490 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
492 #ifdef CONFIG_RT_GROUP_SCHED
493 struct rt_rq *rt_rq = group_rt_rq(rt_se);
496 return rt_rq->highest_prio.curr;
499 return rt_task_of(rt_se)->prio;
502 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
504 u64 runtime = sched_rt_runtime(rt_rq);
506 if (rt_rq->rt_throttled)
507 return rt_rq_throttled(rt_rq);
509 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
512 balance_runtime(rt_rq);
513 runtime = sched_rt_runtime(rt_rq);
514 if (runtime == RUNTIME_INF)
517 if (rt_rq->rt_time > runtime) {
518 rt_rq->rt_throttled = 1;
519 if (rt_rq_throttled(rt_rq)) {
520 sched_rt_rq_dequeue(rt_rq);
529 * Update the current task's runtime statistics. Skip current tasks that
530 * are not in our scheduling class.
532 static void update_curr_rt(struct rq *rq)
534 struct task_struct *curr = rq->curr;
535 struct sched_rt_entity *rt_se = &curr->rt;
536 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
539 if (!task_has_rt_policy(curr))
542 delta_exec = rq->clock - curr->se.exec_start;
543 if (unlikely((s64)delta_exec < 0))
546 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
548 curr->se.sum_exec_runtime += delta_exec;
549 account_group_exec_runtime(curr, delta_exec);
551 curr->se.exec_start = rq->clock;
552 cpuacct_charge(curr, delta_exec);
554 if (!rt_bandwidth_enabled())
557 for_each_sched_rt_entity(rt_se) {
558 rt_rq = rt_rq_of_se(rt_se);
560 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
561 spin_lock(&rt_rq->rt_runtime_lock);
562 rt_rq->rt_time += delta_exec;
563 if (sched_rt_runtime_exceeded(rt_rq))
565 spin_unlock(&rt_rq->rt_runtime_lock);
570 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
572 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
574 static inline int next_prio(struct rq *rq)
576 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
578 if (next && rt_prio(next->prio))
586 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
588 int prio = rt_se_prio(rt_se);
590 struct rq *rq = rq_of_rt_rq(rt_rq);
593 WARN_ON(!rt_prio(prio));
594 rt_rq->rt_nr_running++;
595 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
596 if (prio < rt_rq->highest_prio.curr) {
599 * If the new task is higher in priority than anything on the
600 * run-queue, we have a new high that must be published to
601 * the world. We also know that the previous high becomes
604 rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
605 rt_rq->highest_prio.curr = prio;
608 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
610 } else if (prio == rt_rq->highest_prio.curr)
612 * If the next task is equal in priority to the highest on
613 * the run-queue, then we implicitly know that the next highest
614 * task cannot be any lower than current
616 rt_rq->highest_prio.next = prio;
617 else if (prio < rt_rq->highest_prio.next)
619 * Otherwise, we need to recompute next-highest
621 rt_rq->highest_prio.next = next_prio(rq);
624 if (rt_se->nr_cpus_allowed > 1)
625 rq->rt.rt_nr_migratory++;
627 update_rt_migration(rq);
629 #ifdef CONFIG_RT_GROUP_SCHED
630 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted++;
634 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
636 start_rt_bandwidth(&def_rt_bandwidth);
641 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
644 struct rq *rq = rq_of_rt_rq(rt_rq);
645 int highest_prio = rt_rq->highest_prio.curr;
648 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
649 WARN_ON(!rt_rq->rt_nr_running);
650 rt_rq->rt_nr_running--;
651 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
652 if (rt_rq->rt_nr_running) {
653 int prio = rt_se_prio(rt_se);
655 WARN_ON(prio < rt_rq->highest_prio.curr);
658 * This may have been our highest or next-highest priority
659 * task and therefore we may have some recomputation to do
661 if (prio == rt_rq->highest_prio.curr) {
662 struct rt_prio_array *array = &rt_rq->active;
664 rt_rq->highest_prio.curr =
665 sched_find_first_bit(array->bitmap);
668 if (prio <= rt_rq->highest_prio.next)
669 rt_rq->highest_prio.next = next_prio(rq);
671 rt_rq->highest_prio.curr = MAX_RT_PRIO;
674 if (rt_se->nr_cpus_allowed > 1)
675 rq->rt.rt_nr_migratory--;
677 if (rq->online && rt_rq->highest_prio.curr != highest_prio)
678 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
680 update_rt_migration(rq);
681 #endif /* CONFIG_SMP */
682 #ifdef CONFIG_RT_GROUP_SCHED
683 if (rt_se_boosted(rt_se))
684 rt_rq->rt_nr_boosted--;
686 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
690 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
692 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
693 struct rt_prio_array *array = &rt_rq->active;
694 struct rt_rq *group_rq = group_rt_rq(rt_se);
695 struct list_head *queue = array->queue + rt_se_prio(rt_se);
698 * Don't enqueue the group if its throttled, or when empty.
699 * The latter is a consequence of the former when a child group
700 * get throttled and the current group doesn't have any other
703 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
706 list_add_tail(&rt_se->run_list, queue);
707 __set_bit(rt_se_prio(rt_se), array->bitmap);
709 inc_rt_tasks(rt_se, rt_rq);
712 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
714 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
715 struct rt_prio_array *array = &rt_rq->active;
717 list_del_init(&rt_se->run_list);
718 if (list_empty(array->queue + rt_se_prio(rt_se)))
719 __clear_bit(rt_se_prio(rt_se), array->bitmap);
721 dec_rt_tasks(rt_se, rt_rq);
725 * Because the prio of an upper entry depends on the lower
726 * entries, we must remove entries top - down.
728 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
730 struct sched_rt_entity *back = NULL;
732 for_each_sched_rt_entity(rt_se) {
737 for (rt_se = back; rt_se; rt_se = rt_se->back) {
739 __dequeue_rt_entity(rt_se);
743 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
745 dequeue_rt_stack(rt_se);
746 for_each_sched_rt_entity(rt_se)
747 __enqueue_rt_entity(rt_se);
750 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
752 dequeue_rt_stack(rt_se);
754 for_each_sched_rt_entity(rt_se) {
755 struct rt_rq *rt_rq = group_rt_rq(rt_se);
757 if (rt_rq && rt_rq->rt_nr_running)
758 __enqueue_rt_entity(rt_se);
763 * Adding/removing a task to/from a priority array:
765 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
767 struct sched_rt_entity *rt_se = &p->rt;
772 enqueue_rt_entity(rt_se);
774 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
775 enqueue_pushable_task(rq, p);
777 inc_cpu_load(rq, p->se.load.weight);
780 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
782 struct sched_rt_entity *rt_se = &p->rt;
785 dequeue_rt_entity(rt_se);
787 dequeue_pushable_task(rq, p);
789 dec_cpu_load(rq, p->se.load.weight);
793 * Put task to the end of the run list without the overhead of dequeue
794 * followed by enqueue.
797 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
799 if (on_rt_rq(rt_se)) {
800 struct rt_prio_array *array = &rt_rq->active;
801 struct list_head *queue = array->queue + rt_se_prio(rt_se);
804 list_move(&rt_se->run_list, queue);
806 list_move_tail(&rt_se->run_list, queue);
810 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
812 struct sched_rt_entity *rt_se = &p->rt;
815 for_each_sched_rt_entity(rt_se) {
816 rt_rq = rt_rq_of_se(rt_se);
817 requeue_rt_entity(rt_rq, rt_se, head);
821 static void yield_task_rt(struct rq *rq)
823 requeue_task_rt(rq, rq->curr, 0);
827 static int find_lowest_rq(struct task_struct *task);
829 static int select_task_rq_rt(struct task_struct *p, int sync)
831 struct rq *rq = task_rq(p);
834 * If the current task is an RT task, then
835 * try to see if we can wake this RT task up on another
836 * runqueue. Otherwise simply start this RT task
837 * on its current runqueue.
839 * We want to avoid overloading runqueues. Even if
840 * the RT task is of higher priority than the current RT task.
841 * RT tasks behave differently than other tasks. If
842 * one gets preempted, we try to push it off to another queue.
843 * So trying to keep a preempting RT task on the same
844 * cache hot CPU will force the running RT task to
845 * a cold CPU. So we waste all the cache for the lower
846 * RT task in hopes of saving some of a RT task
847 * that is just being woken and probably will have
850 if (unlikely(rt_task(rq->curr)) &&
851 (p->rt.nr_cpus_allowed > 1)) {
852 int cpu = find_lowest_rq(p);
854 return (cpu == -1) ? task_cpu(p) : cpu;
858 * Otherwise, just let it ride on the affined RQ and the
859 * post-schedule router will push the preempted task away
864 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
868 if (rq->curr->rt.nr_cpus_allowed == 1)
871 if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
874 if (p->rt.nr_cpus_allowed != 1
875 && cpupri_find(&rq->rd->cpupri, p, mask))
878 if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
882 * There appears to be other cpus that can accept
883 * current and none to run 'p', so lets reschedule
884 * to try and push current away:
886 requeue_task_rt(rq, p, 1);
887 resched_task(rq->curr);
889 free_cpumask_var(mask);
892 #endif /* CONFIG_SMP */
895 * Preempt the current task with a newly woken task if needed:
897 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
899 if (p->prio < rq->curr->prio) {
900 resched_task(rq->curr);
908 * - the newly woken task is of equal priority to the current task
909 * - the newly woken task is non-migratable while current is migratable
910 * - current will be preempted on the next reschedule
912 * we should check to see if current can readily move to a different
913 * cpu. If so, we will reschedule to allow the push logic to try
914 * to move current somewhere else, making room for our non-migratable
917 if (p->prio == rq->curr->prio && !need_resched())
918 check_preempt_equal_prio(rq, p);
922 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
925 struct rt_prio_array *array = &rt_rq->active;
926 struct sched_rt_entity *next = NULL;
927 struct list_head *queue;
930 idx = sched_find_first_bit(array->bitmap);
931 BUG_ON(idx >= MAX_RT_PRIO);
933 queue = array->queue + idx;
934 next = list_entry(queue->next, struct sched_rt_entity, run_list);
939 static struct task_struct *_pick_next_task_rt(struct rq *rq)
941 struct sched_rt_entity *rt_se;
942 struct task_struct *p;
947 if (unlikely(!rt_rq->rt_nr_running))
950 if (rt_rq_throttled(rt_rq))
954 rt_se = pick_next_rt_entity(rq, rt_rq);
956 rt_rq = group_rt_rq(rt_se);
959 p = rt_task_of(rt_se);
960 p->se.exec_start = rq->clock;
965 static struct task_struct *pick_next_task_rt(struct rq *rq)
967 struct task_struct *p = _pick_next_task_rt(rq);
969 /* The running task is never eligible for pushing */
971 dequeue_pushable_task(rq, p);
976 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
979 p->se.exec_start = 0;
982 * The previous task needs to be made eligible for pushing
983 * if it is still active
985 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
986 enqueue_pushable_task(rq, p);
991 /* Only try algorithms three times */
992 #define RT_MAX_TRIES 3
994 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
996 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
998 if (!task_running(rq, p) &&
999 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1000 (p->rt.nr_cpus_allowed > 1))
1005 /* Return the second highest RT task, NULL otherwise */
1006 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1008 struct task_struct *next = NULL;
1009 struct sched_rt_entity *rt_se;
1010 struct rt_prio_array *array;
1011 struct rt_rq *rt_rq;
1014 for_each_leaf_rt_rq(rt_rq, rq) {
1015 array = &rt_rq->active;
1016 idx = sched_find_first_bit(array->bitmap);
1018 if (idx >= MAX_RT_PRIO)
1020 if (next && next->prio < idx)
1022 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1023 struct task_struct *p = rt_task_of(rt_se);
1024 if (pick_rt_task(rq, p, cpu)) {
1030 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1038 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1040 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
1044 /* "this_cpu" is cheaper to preempt than a remote processor */
1045 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
1048 first = first_cpu(*mask);
1049 if (first != NR_CPUS)
1055 static int find_lowest_rq(struct task_struct *task)
1057 struct sched_domain *sd;
1058 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1059 int this_cpu = smp_processor_id();
1060 int cpu = task_cpu(task);
1062 if (task->rt.nr_cpus_allowed == 1)
1063 return -1; /* No other targets possible */
1065 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1066 return -1; /* No targets found */
1069 * Only consider CPUs that are usable for migration.
1070 * I guess we might want to change cpupri_find() to ignore those
1071 * in the first place.
1073 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
1076 * At this point we have built a mask of cpus representing the
1077 * lowest priority tasks in the system. Now we want to elect
1078 * the best one based on our affinity and topology.
1080 * We prioritize the last cpu that the task executed on since
1081 * it is most likely cache-hot in that location.
1083 if (cpumask_test_cpu(cpu, lowest_mask))
1087 * Otherwise, we consult the sched_domains span maps to figure
1088 * out which cpu is logically closest to our hot cache data.
1090 if (this_cpu == cpu)
1091 this_cpu = -1; /* Skip this_cpu opt if the same */
1093 for_each_domain(cpu, sd) {
1094 if (sd->flags & SD_WAKE_AFFINE) {
1095 cpumask_t domain_mask;
1098 cpumask_and(&domain_mask, sched_domain_span(sd),
1101 best_cpu = pick_optimal_cpu(this_cpu,
1109 * And finally, if there were no matches within the domains
1110 * just give the caller *something* to work with from the compatible
1113 return pick_optimal_cpu(this_cpu, lowest_mask);
1116 /* Will lock the rq it finds */
1117 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1119 struct rq *lowest_rq = NULL;
1123 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1124 cpu = find_lowest_rq(task);
1126 if ((cpu == -1) || (cpu == rq->cpu))
1129 lowest_rq = cpu_rq(cpu);
1131 /* if the prio of this runqueue changed, try again */
1132 if (double_lock_balance(rq, lowest_rq)) {
1134 * We had to unlock the run queue. In
1135 * the mean time, task could have
1136 * migrated already or had its affinity changed.
1137 * Also make sure that it wasn't scheduled on its rq.
1139 if (unlikely(task_rq(task) != rq ||
1140 !cpumask_test_cpu(lowest_rq->cpu,
1141 &task->cpus_allowed) ||
1142 task_running(rq, task) ||
1145 spin_unlock(&lowest_rq->lock);
1151 /* If this rq is still suitable use it. */
1152 if (lowest_rq->rt.highest_prio.curr > task->prio)
1156 double_unlock_balance(rq, lowest_rq);
1163 static inline int has_pushable_tasks(struct rq *rq)
1165 return !plist_head_empty(&rq->rt.pushable_tasks);
1168 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1170 struct task_struct *p;
1172 if (!has_pushable_tasks(rq))
1175 p = plist_first_entry(&rq->rt.pushable_tasks,
1176 struct task_struct, pushable_tasks);
1178 BUG_ON(rq->cpu != task_cpu(p));
1179 BUG_ON(task_current(rq, p));
1180 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1182 BUG_ON(!p->se.on_rq);
1183 BUG_ON(!rt_task(p));
1189 * If the current CPU has more than one RT task, see if the non
1190 * running task can migrate over to a CPU that is running a task
1191 * of lesser priority.
1193 static int push_rt_task(struct rq *rq)
1195 struct task_struct *next_task;
1196 struct rq *lowest_rq;
1198 if (!rq->rt.overloaded)
1201 next_task = pick_next_pushable_task(rq);
1206 if (unlikely(next_task == rq->curr)) {
1212 * It's possible that the next_task slipped in of
1213 * higher priority than current. If that's the case
1214 * just reschedule current.
1216 if (unlikely(next_task->prio < rq->curr->prio)) {
1217 resched_task(rq->curr);
1221 /* We might release rq lock */
1222 get_task_struct(next_task);
1224 /* find_lock_lowest_rq locks the rq if found */
1225 lowest_rq = find_lock_lowest_rq(next_task, rq);
1227 struct task_struct *task;
1229 * find lock_lowest_rq releases rq->lock
1230 * so it is possible that next_task has migrated.
1232 * We need to make sure that the task is still on the same
1233 * run-queue and is also still the next task eligible for
1236 task = pick_next_pushable_task(rq);
1237 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1239 * If we get here, the task hasnt moved at all, but
1240 * it has failed to push. We will not try again,
1241 * since the other cpus will pull from us when they
1244 dequeue_pushable_task(rq, next_task);
1249 /* No more tasks, just exit */
1253 * Something has shifted, try again.
1255 put_task_struct(next_task);
1260 deactivate_task(rq, next_task, 0);
1261 set_task_cpu(next_task, lowest_rq->cpu);
1262 activate_task(lowest_rq, next_task, 0);
1264 resched_task(lowest_rq->curr);
1266 double_unlock_balance(rq, lowest_rq);
1269 put_task_struct(next_task);
1274 static void push_rt_tasks(struct rq *rq)
1276 /* push_rt_task will return true if it moved an RT */
1277 while (push_rt_task(rq))
1281 static int pull_rt_task(struct rq *this_rq)
1283 int this_cpu = this_rq->cpu, ret = 0, cpu;
1284 struct task_struct *p;
1287 if (likely(!rt_overloaded(this_rq)))
1290 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1291 if (this_cpu == cpu)
1294 src_rq = cpu_rq(cpu);
1297 * Don't bother taking the src_rq->lock if the next highest
1298 * task is known to be lower-priority than our current task.
1299 * This may look racy, but if this value is about to go
1300 * logically higher, the src_rq will push this task away.
1301 * And if its going logically lower, we do not care
1303 if (src_rq->rt.highest_prio.next >=
1304 this_rq->rt.highest_prio.curr)
1308 * We can potentially drop this_rq's lock in
1309 * double_lock_balance, and another CPU could
1312 double_lock_balance(this_rq, src_rq);
1315 * Are there still pullable RT tasks?
1317 if (src_rq->rt.rt_nr_running <= 1)
1320 p = pick_next_highest_task_rt(src_rq, this_cpu);
1323 * Do we have an RT task that preempts
1324 * the to-be-scheduled task?
1326 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1327 WARN_ON(p == src_rq->curr);
1328 WARN_ON(!p->se.on_rq);
1331 * There's a chance that p is higher in priority
1332 * than what's currently running on its cpu.
1333 * This is just that p is wakeing up and hasn't
1334 * had a chance to schedule. We only pull
1335 * p if it is lower in priority than the
1336 * current task on the run queue
1338 if (p->prio < src_rq->curr->prio)
1343 deactivate_task(src_rq, p, 0);
1344 set_task_cpu(p, this_cpu);
1345 activate_task(this_rq, p, 0);
1347 * We continue with the search, just in
1348 * case there's an even higher prio task
1349 * in another runqueue. (low likelyhood
1354 double_unlock_balance(this_rq, src_rq);
1360 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1362 /* Try to pull RT tasks here if we lower this rq's prio */
1363 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1368 * assumes rq->lock is held
1370 static int needs_post_schedule_rt(struct rq *rq)
1372 return has_pushable_tasks(rq);
1375 static void post_schedule_rt(struct rq *rq)
1378 * This is only called if needs_post_schedule_rt() indicates that
1379 * we need to push tasks away
1381 spin_lock_irq(&rq->lock);
1383 spin_unlock_irq(&rq->lock);
1387 * If we are not running and we are not going to reschedule soon, we should
1388 * try to push tasks away now
1390 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1392 if (!task_running(rq, p) &&
1393 !test_tsk_need_resched(rq->curr) &&
1394 has_pushable_tasks(rq) &&
1395 p->rt.nr_cpus_allowed > 1)
1399 static unsigned long
1400 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1401 unsigned long max_load_move,
1402 struct sched_domain *sd, enum cpu_idle_type idle,
1403 int *all_pinned, int *this_best_prio)
1405 /* don't touch RT tasks */
1410 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1411 struct sched_domain *sd, enum cpu_idle_type idle)
1413 /* don't touch RT tasks */
1417 static void set_cpus_allowed_rt(struct task_struct *p,
1418 const struct cpumask *new_mask)
1420 int weight = cpumask_weight(new_mask);
1422 BUG_ON(!rt_task(p));
1425 * Update the migration status of the RQ if we have an RT task
1426 * which is running AND changing its weight value.
1428 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1429 struct rq *rq = task_rq(p);
1431 if (!task_current(rq, p)) {
1433 * Make sure we dequeue this task from the pushable list
1434 * before going further. It will either remain off of
1435 * the list because we are no longer pushable, or it
1438 if (p->rt.nr_cpus_allowed > 1)
1439 dequeue_pushable_task(rq, p);
1442 * Requeue if our weight is changing and still > 1
1445 enqueue_pushable_task(rq, p);
1449 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1450 rq->rt.rt_nr_migratory++;
1451 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1452 BUG_ON(!rq->rt.rt_nr_migratory);
1453 rq->rt.rt_nr_migratory--;
1456 update_rt_migration(rq);
1459 cpumask_copy(&p->cpus_allowed, new_mask);
1460 p->rt.nr_cpus_allowed = weight;
1463 /* Assumes rq->lock is held */
1464 static void rq_online_rt(struct rq *rq)
1466 if (rq->rt.overloaded)
1467 rt_set_overload(rq);
1469 __enable_runtime(rq);
1471 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1474 /* Assumes rq->lock is held */
1475 static void rq_offline_rt(struct rq *rq)
1477 if (rq->rt.overloaded)
1478 rt_clear_overload(rq);
1480 __disable_runtime(rq);
1482 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1486 * When switch from the rt queue, we bring ourselves to a position
1487 * that we might want to pull RT tasks from other runqueues.
1489 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1493 * If there are other RT tasks then we will reschedule
1494 * and the scheduling of the other RT tasks will handle
1495 * the balancing. But if we are the last RT task
1496 * we may need to handle the pulling of RT tasks
1499 if (!rq->rt.rt_nr_running)
1503 static inline void init_sched_rt_class(void)
1507 for_each_possible_cpu(i)
1508 alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1509 GFP_KERNEL, cpu_to_node(i));
1511 #endif /* CONFIG_SMP */
1514 * When switching a task to RT, we may overload the runqueue
1515 * with RT tasks. In this case we try to push them off to
1518 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1521 int check_resched = 1;
1524 * If we are already running, then there's nothing
1525 * that needs to be done. But if we are not running
1526 * we may need to preempt the current running task.
1527 * If that current running task is also an RT task
1528 * then see if we can move to another run queue.
1532 if (rq->rt.overloaded && push_rt_task(rq) &&
1533 /* Don't resched if we changed runqueues */
1536 #endif /* CONFIG_SMP */
1537 if (check_resched && p->prio < rq->curr->prio)
1538 resched_task(rq->curr);
1543 * Priority of the task has changed. This may cause
1544 * us to initiate a push or pull.
1546 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1547 int oldprio, int running)
1552 * If our priority decreases while running, we
1553 * may need to pull tasks to this runqueue.
1555 if (oldprio < p->prio)
1558 * If there's a higher priority task waiting to run
1559 * then reschedule. Note, the above pull_rt_task
1560 * can release the rq lock and p could migrate.
1561 * Only reschedule if p is still on the same runqueue.
1563 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1566 /* For UP simply resched on drop of prio */
1567 if (oldprio < p->prio)
1569 #endif /* CONFIG_SMP */
1572 * This task is not running, but if it is
1573 * greater than the current running task
1576 if (p->prio < rq->curr->prio)
1577 resched_task(rq->curr);
1581 static void watchdog(struct rq *rq, struct task_struct *p)
1583 unsigned long soft, hard;
1588 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1589 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1591 if (soft != RLIM_INFINITY) {
1595 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1596 if (p->rt.timeout > next)
1597 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1601 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1608 * RR tasks need a special form of timeslice management.
1609 * FIFO tasks have no timeslices.
1611 if (p->policy != SCHED_RR)
1614 if (--p->rt.time_slice)
1617 p->rt.time_slice = DEF_TIMESLICE;
1620 * Requeue to the end of queue if we are not the only element
1623 if (p->rt.run_list.prev != p->rt.run_list.next) {
1624 requeue_task_rt(rq, p, 0);
1625 set_tsk_need_resched(p);
1629 static void set_curr_task_rt(struct rq *rq)
1631 struct task_struct *p = rq->curr;
1633 p->se.exec_start = rq->clock;
1635 /* The running task is never eligible for pushing */
1636 dequeue_pushable_task(rq, p);
1639 static const struct sched_class rt_sched_class = {
1640 .next = &fair_sched_class,
1641 .enqueue_task = enqueue_task_rt,
1642 .dequeue_task = dequeue_task_rt,
1643 .yield_task = yield_task_rt,
1645 .check_preempt_curr = check_preempt_curr_rt,
1647 .pick_next_task = pick_next_task_rt,
1648 .put_prev_task = put_prev_task_rt,
1651 .select_task_rq = select_task_rq_rt,
1653 .load_balance = load_balance_rt,
1654 .move_one_task = move_one_task_rt,
1655 .set_cpus_allowed = set_cpus_allowed_rt,
1656 .rq_online = rq_online_rt,
1657 .rq_offline = rq_offline_rt,
1658 .pre_schedule = pre_schedule_rt,
1659 .needs_post_schedule = needs_post_schedule_rt,
1660 .post_schedule = post_schedule_rt,
1661 .task_wake_up = task_wake_up_rt,
1662 .switched_from = switched_from_rt,
1665 .set_curr_task = set_curr_task_rt,
1666 .task_tick = task_tick_rt,
1668 .prio_changed = prio_changed_rt,
1669 .switched_to = switched_to_rt,
1672 #ifdef CONFIG_SCHED_DEBUG
1673 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1675 static void print_rt_stats(struct seq_file *m, int cpu)
1677 struct rt_rq *rt_rq;
1680 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1681 print_rt_rq(m, cpu, rt_rq);
1684 #endif /* CONFIG_SCHED_DEBUG */