2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
18 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
27 atomic_inc(&rq->rd->rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
40 static void update_rt_migration(struct rq *rq)
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
45 rq->rt.overloaded = 1;
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
52 #endif /* CONFIG_SMP */
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
56 return container_of(rt_se, struct task_struct, rt);
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
61 return !list_empty(&rt_se->run_list);
64 #ifdef CONFIG_RT_GROUP_SCHED
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
71 return rt_rq->rt_runtime;
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
92 #define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
105 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
106 struct sched_rt_entity *rt_se = rt_rq->rt_se;
108 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio.curr < curr->prio)
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
135 return !!rt_rq->rt_nr_boosted;
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
142 static inline const struct cpumask *sched_rt_period_mask(void)
144 return cpu_rq(smp_processor_id())->rd->span;
147 static inline const struct cpumask *sched_rt_period_mask(void)
149 return cpu_online_mask;
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
161 return &rt_rq->tg->rt_bandwidth;
164 #else /* !CONFIG_RT_GROUP_SCHED */
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
168 return rt_rq->rt_runtime;
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
181 return container_of(rt_rq, struct rq, rt);
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
192 #define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
202 if (rt_rq->rt_nr_running)
203 resched_task(rq_of_rt_rq(rt_rq)->curr);
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
212 return rt_rq->rt_throttled;
215 static inline const struct cpumask *sched_rt_period_mask(void)
217 return cpu_online_mask;
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
223 return &cpu_rq(cpu)->rt;
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
228 return &def_rt_bandwidth;
231 #endif /* CONFIG_RT_GROUP_SCHED */
235 * We ran out of runtime, see if we can borrow some from our neighbours.
237 static int do_balance_runtime(struct rt_rq *rt_rq)
239 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
240 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
241 int i, weight, more = 0;
244 weight = cpumask_weight(rd->span);
246 spin_lock(&rt_b->rt_runtime_lock);
247 rt_period = ktime_to_ns(rt_b->rt_period);
248 for_each_cpu(i, rd->span) {
249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
255 spin_lock(&iter->rt_runtime_lock);
257 * Either all rqs have inf runtime and there's nothing to steal
258 * or __disable_runtime() below sets a specific rq to inf to
259 * indicate its been disabled and disalow stealing.
261 if (iter->rt_runtime == RUNTIME_INF)
265 * From runqueues with spare time, take 1/n part of their
266 * spare time, but no more than our period.
268 diff = iter->rt_runtime - iter->rt_time;
270 diff = div_u64((u64)diff, weight);
271 if (rt_rq->rt_runtime + diff > rt_period)
272 diff = rt_period - rt_rq->rt_runtime;
273 iter->rt_runtime -= diff;
274 rt_rq->rt_runtime += diff;
276 if (rt_rq->rt_runtime == rt_period) {
277 spin_unlock(&iter->rt_runtime_lock);
282 spin_unlock(&iter->rt_runtime_lock);
284 spin_unlock(&rt_b->rt_runtime_lock);
290 * Ensure this RQ takes back all the runtime it lend to its neighbours.
292 static void __disable_runtime(struct rq *rq)
294 struct root_domain *rd = rq->rd;
297 if (unlikely(!scheduler_running))
300 for_each_leaf_rt_rq(rt_rq, rq) {
301 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
305 spin_lock(&rt_b->rt_runtime_lock);
306 spin_lock(&rt_rq->rt_runtime_lock);
308 * Either we're all inf and nobody needs to borrow, or we're
309 * already disabled and thus have nothing to do, or we have
310 * exactly the right amount of runtime to take out.
312 if (rt_rq->rt_runtime == RUNTIME_INF ||
313 rt_rq->rt_runtime == rt_b->rt_runtime)
315 spin_unlock(&rt_rq->rt_runtime_lock);
318 * Calculate the difference between what we started out with
319 * and what we current have, that's the amount of runtime
320 * we lend and now have to reclaim.
322 want = rt_b->rt_runtime - rt_rq->rt_runtime;
325 * Greedy reclaim, take back as much as we can.
327 for_each_cpu(i, rd->span) {
328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
332 * Can't reclaim from ourselves or disabled runqueues.
334 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
337 spin_lock(&iter->rt_runtime_lock);
339 diff = min_t(s64, iter->rt_runtime, want);
340 iter->rt_runtime -= diff;
343 iter->rt_runtime -= want;
346 spin_unlock(&iter->rt_runtime_lock);
352 spin_lock(&rt_rq->rt_runtime_lock);
354 * We cannot be left wanting - that would mean some runtime
355 * leaked out of the system.
360 * Disable all the borrow logic by pretending we have inf
361 * runtime - in which case borrowing doesn't make sense.
363 rt_rq->rt_runtime = RUNTIME_INF;
364 spin_unlock(&rt_rq->rt_runtime_lock);
365 spin_unlock(&rt_b->rt_runtime_lock);
369 static void disable_runtime(struct rq *rq)
373 spin_lock_irqsave(&rq->lock, flags);
374 __disable_runtime(rq);
375 spin_unlock_irqrestore(&rq->lock, flags);
378 static void __enable_runtime(struct rq *rq)
382 if (unlikely(!scheduler_running))
386 * Reset each runqueue's bandwidth settings
388 for_each_leaf_rt_rq(rt_rq, rq) {
389 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
391 spin_lock(&rt_b->rt_runtime_lock);
392 spin_lock(&rt_rq->rt_runtime_lock);
393 rt_rq->rt_runtime = rt_b->rt_runtime;
395 rt_rq->rt_throttled = 0;
396 spin_unlock(&rt_rq->rt_runtime_lock);
397 spin_unlock(&rt_b->rt_runtime_lock);
401 static void enable_runtime(struct rq *rq)
405 spin_lock_irqsave(&rq->lock, flags);
406 __enable_runtime(rq);
407 spin_unlock_irqrestore(&rq->lock, flags);
410 static int balance_runtime(struct rt_rq *rt_rq)
414 if (rt_rq->rt_time > rt_rq->rt_runtime) {
415 spin_unlock(&rt_rq->rt_runtime_lock);
416 more = do_balance_runtime(rt_rq);
417 spin_lock(&rt_rq->rt_runtime_lock);
422 #else /* !CONFIG_SMP */
423 static inline int balance_runtime(struct rt_rq *rt_rq)
427 #endif /* CONFIG_SMP */
429 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
432 const struct cpumask *span;
434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
437 span = sched_rt_period_mask();
438 for_each_cpu(i, span) {
440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
441 struct rq *rq = rq_of_rt_rq(rt_rq);
443 spin_lock(&rq->lock);
444 if (rt_rq->rt_time) {
447 spin_lock(&rt_rq->rt_runtime_lock);
448 if (rt_rq->rt_throttled)
449 balance_runtime(rt_rq);
450 runtime = rt_rq->rt_runtime;
451 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
452 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
453 rt_rq->rt_throttled = 0;
456 if (rt_rq->rt_time || rt_rq->rt_nr_running)
458 spin_unlock(&rt_rq->rt_runtime_lock);
459 } else if (rt_rq->rt_nr_running)
463 sched_rt_rq_enqueue(rt_rq);
464 spin_unlock(&rq->lock);
470 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
472 #ifdef CONFIG_RT_GROUP_SCHED
473 struct rt_rq *rt_rq = group_rt_rq(rt_se);
476 return rt_rq->highest_prio.curr;
479 return rt_task_of(rt_se)->prio;
482 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
484 u64 runtime = sched_rt_runtime(rt_rq);
486 if (rt_rq->rt_throttled)
487 return rt_rq_throttled(rt_rq);
489 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
492 balance_runtime(rt_rq);
493 runtime = sched_rt_runtime(rt_rq);
494 if (runtime == RUNTIME_INF)
497 if (rt_rq->rt_time > runtime) {
498 rt_rq->rt_throttled = 1;
499 if (rt_rq_throttled(rt_rq)) {
500 sched_rt_rq_dequeue(rt_rq);
509 * Update the current task's runtime statistics. Skip current tasks that
510 * are not in our scheduling class.
512 static void update_curr_rt(struct rq *rq)
514 struct task_struct *curr = rq->curr;
515 struct sched_rt_entity *rt_se = &curr->rt;
516 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
519 if (!task_has_rt_policy(curr))
522 delta_exec = rq->clock - curr->se.exec_start;
523 if (unlikely((s64)delta_exec < 0))
526 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
528 curr->se.sum_exec_runtime += delta_exec;
529 account_group_exec_runtime(curr, delta_exec);
531 curr->se.exec_start = rq->clock;
532 cpuacct_charge(curr, delta_exec);
534 if (!rt_bandwidth_enabled())
537 for_each_sched_rt_entity(rt_se) {
538 rt_rq = rt_rq_of_se(rt_se);
540 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
541 spin_lock(&rt_rq->rt_runtime_lock);
542 rt_rq->rt_time += delta_exec;
543 if (sched_rt_runtime_exceeded(rt_rq))
545 spin_unlock(&rt_rq->rt_runtime_lock);
550 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
552 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
554 static inline int next_prio(struct rq *rq)
556 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
558 if (next && rt_prio(next->prio))
566 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
568 int prio = rt_se_prio(rt_se);
570 struct rq *rq = rq_of_rt_rq(rt_rq);
573 WARN_ON(!rt_prio(prio));
574 rt_rq->rt_nr_running++;
575 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
576 if (prio < rt_rq->highest_prio.curr) {
579 * If the new task is higher in priority than anything on the
580 * run-queue, we have a new high that must be published to
581 * the world. We also know that the previous high becomes
584 rt_rq->highest_prio.next = rt_rq->highest_prio.curr;
585 rt_rq->highest_prio.curr = prio;
588 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
590 } else if (prio == rt_rq->highest_prio.curr)
592 * If the next task is equal in priority to the highest on
593 * the run-queue, then we implicitly know that the next highest
594 * task cannot be any lower than current
596 rt_rq->highest_prio.next = prio;
597 else if (prio < rt_rq->highest_prio.next)
599 * Otherwise, we need to recompute next-highest
601 rt_rq->highest_prio.next = next_prio(rq);
604 if (rt_se->nr_cpus_allowed > 1)
605 rq->rt.rt_nr_migratory++;
607 update_rt_migration(rq);
609 #ifdef CONFIG_RT_GROUP_SCHED
610 if (rt_se_boosted(rt_se))
611 rt_rq->rt_nr_boosted++;
614 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
616 start_rt_bandwidth(&def_rt_bandwidth);
621 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
624 struct rq *rq = rq_of_rt_rq(rt_rq);
625 int highest_prio = rt_rq->highest_prio.curr;
628 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
629 WARN_ON(!rt_rq->rt_nr_running);
630 rt_rq->rt_nr_running--;
631 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
632 if (rt_rq->rt_nr_running) {
633 int prio = rt_se_prio(rt_se);
635 WARN_ON(prio < rt_rq->highest_prio.curr);
638 * This may have been our highest or next-highest priority
639 * task and therefore we may have some recomputation to do
641 if (prio == rt_rq->highest_prio.curr) {
642 struct rt_prio_array *array = &rt_rq->active;
644 rt_rq->highest_prio.curr =
645 sched_find_first_bit(array->bitmap);
648 if (prio <= rt_rq->highest_prio.next)
649 rt_rq->highest_prio.next = next_prio(rq);
651 rt_rq->highest_prio.curr = MAX_RT_PRIO;
654 if (rt_se->nr_cpus_allowed > 1)
655 rq->rt.rt_nr_migratory--;
657 if (rq->online && rt_rq->highest_prio.curr != highest_prio)
658 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
660 update_rt_migration(rq);
661 #endif /* CONFIG_SMP */
662 #ifdef CONFIG_RT_GROUP_SCHED
663 if (rt_se_boosted(rt_se))
664 rt_rq->rt_nr_boosted--;
666 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
670 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
672 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
673 struct rt_prio_array *array = &rt_rq->active;
674 struct rt_rq *group_rq = group_rt_rq(rt_se);
675 struct list_head *queue = array->queue + rt_se_prio(rt_se);
678 * Don't enqueue the group if its throttled, or when empty.
679 * The latter is a consequence of the former when a child group
680 * get throttled and the current group doesn't have any other
683 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
686 list_add_tail(&rt_se->run_list, queue);
687 __set_bit(rt_se_prio(rt_se), array->bitmap);
689 inc_rt_tasks(rt_se, rt_rq);
692 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
694 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
695 struct rt_prio_array *array = &rt_rq->active;
697 list_del_init(&rt_se->run_list);
698 if (list_empty(array->queue + rt_se_prio(rt_se)))
699 __clear_bit(rt_se_prio(rt_se), array->bitmap);
701 dec_rt_tasks(rt_se, rt_rq);
705 * Because the prio of an upper entry depends on the lower
706 * entries, we must remove entries top - down.
708 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
710 struct sched_rt_entity *back = NULL;
712 for_each_sched_rt_entity(rt_se) {
717 for (rt_se = back; rt_se; rt_se = rt_se->back) {
719 __dequeue_rt_entity(rt_se);
723 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
725 dequeue_rt_stack(rt_se);
726 for_each_sched_rt_entity(rt_se)
727 __enqueue_rt_entity(rt_se);
730 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
732 dequeue_rt_stack(rt_se);
734 for_each_sched_rt_entity(rt_se) {
735 struct rt_rq *rt_rq = group_rt_rq(rt_se);
737 if (rt_rq && rt_rq->rt_nr_running)
738 __enqueue_rt_entity(rt_se);
743 * Adding/removing a task to/from a priority array:
745 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
747 struct sched_rt_entity *rt_se = &p->rt;
752 enqueue_rt_entity(rt_se);
754 inc_cpu_load(rq, p->se.load.weight);
757 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
759 struct sched_rt_entity *rt_se = &p->rt;
762 dequeue_rt_entity(rt_se);
764 dec_cpu_load(rq, p->se.load.weight);
768 * Put task to the end of the run list without the overhead of dequeue
769 * followed by enqueue.
772 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
774 if (on_rt_rq(rt_se)) {
775 struct rt_prio_array *array = &rt_rq->active;
776 struct list_head *queue = array->queue + rt_se_prio(rt_se);
779 list_move(&rt_se->run_list, queue);
781 list_move_tail(&rt_se->run_list, queue);
785 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
787 struct sched_rt_entity *rt_se = &p->rt;
790 for_each_sched_rt_entity(rt_se) {
791 rt_rq = rt_rq_of_se(rt_se);
792 requeue_rt_entity(rt_rq, rt_se, head);
796 static void yield_task_rt(struct rq *rq)
798 requeue_task_rt(rq, rq->curr, 0);
802 static int find_lowest_rq(struct task_struct *task);
804 static int select_task_rq_rt(struct task_struct *p, int sync)
806 struct rq *rq = task_rq(p);
809 * If the current task is an RT task, then
810 * try to see if we can wake this RT task up on another
811 * runqueue. Otherwise simply start this RT task
812 * on its current runqueue.
814 * We want to avoid overloading runqueues. Even if
815 * the RT task is of higher priority than the current RT task.
816 * RT tasks behave differently than other tasks. If
817 * one gets preempted, we try to push it off to another queue.
818 * So trying to keep a preempting RT task on the same
819 * cache hot CPU will force the running RT task to
820 * a cold CPU. So we waste all the cache for the lower
821 * RT task in hopes of saving some of a RT task
822 * that is just being woken and probably will have
825 if (unlikely(rt_task(rq->curr)) &&
826 (p->rt.nr_cpus_allowed > 1)) {
827 int cpu = find_lowest_rq(p);
829 return (cpu == -1) ? task_cpu(p) : cpu;
833 * Otherwise, just let it ride on the affined RQ and the
834 * post-schedule router will push the preempted task away
839 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
843 if (rq->curr->rt.nr_cpus_allowed == 1)
846 if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
849 if (p->rt.nr_cpus_allowed != 1
850 && cpupri_find(&rq->rd->cpupri, p, mask))
853 if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
857 * There appears to be other cpus that can accept
858 * current and none to run 'p', so lets reschedule
859 * to try and push current away:
861 requeue_task_rt(rq, p, 1);
862 resched_task(rq->curr);
864 free_cpumask_var(mask);
867 #endif /* CONFIG_SMP */
870 * Preempt the current task with a newly woken task if needed:
872 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
874 if (p->prio < rq->curr->prio) {
875 resched_task(rq->curr);
883 * - the newly woken task is of equal priority to the current task
884 * - the newly woken task is non-migratable while current is migratable
885 * - current will be preempted on the next reschedule
887 * we should check to see if current can readily move to a different
888 * cpu. If so, we will reschedule to allow the push logic to try
889 * to move current somewhere else, making room for our non-migratable
892 if (p->prio == rq->curr->prio && !need_resched())
893 check_preempt_equal_prio(rq, p);
897 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
900 struct rt_prio_array *array = &rt_rq->active;
901 struct sched_rt_entity *next = NULL;
902 struct list_head *queue;
905 idx = sched_find_first_bit(array->bitmap);
906 BUG_ON(idx >= MAX_RT_PRIO);
908 queue = array->queue + idx;
909 next = list_entry(queue->next, struct sched_rt_entity, run_list);
914 static struct task_struct *pick_next_task_rt(struct rq *rq)
916 struct sched_rt_entity *rt_se;
917 struct task_struct *p;
922 if (unlikely(!rt_rq->rt_nr_running))
925 if (rt_rq_throttled(rt_rq))
929 rt_se = pick_next_rt_entity(rq, rt_rq);
931 rt_rq = group_rt_rq(rt_se);
934 p = rt_task_of(rt_se);
935 p->se.exec_start = rq->clock;
939 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
942 p->se.exec_start = 0;
947 /* Only try algorithms three times */
948 #define RT_MAX_TRIES 3
950 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
952 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
954 if (!task_running(rq, p) &&
955 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
956 (p->rt.nr_cpus_allowed > 1))
961 /* Return the second highest RT task, NULL otherwise */
962 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
964 struct task_struct *next = NULL;
965 struct sched_rt_entity *rt_se;
966 struct rt_prio_array *array;
970 for_each_leaf_rt_rq(rt_rq, rq) {
971 array = &rt_rq->active;
972 idx = sched_find_first_bit(array->bitmap);
974 if (idx >= MAX_RT_PRIO)
976 if (next && next->prio < idx)
978 list_for_each_entry(rt_se, array->queue + idx, run_list) {
979 struct task_struct *p = rt_task_of(rt_se);
980 if (pick_rt_task(rq, p, cpu)) {
986 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
994 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
996 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
1000 /* "this_cpu" is cheaper to preempt than a remote processor */
1001 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
1004 first = first_cpu(*mask);
1005 if (first != NR_CPUS)
1011 static int find_lowest_rq(struct task_struct *task)
1013 struct sched_domain *sd;
1014 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1015 int this_cpu = smp_processor_id();
1016 int cpu = task_cpu(task);
1018 if (task->rt.nr_cpus_allowed == 1)
1019 return -1; /* No other targets possible */
1021 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1022 return -1; /* No targets found */
1025 * Only consider CPUs that are usable for migration.
1026 * I guess we might want to change cpupri_find() to ignore those
1027 * in the first place.
1029 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
1032 * At this point we have built a mask of cpus representing the
1033 * lowest priority tasks in the system. Now we want to elect
1034 * the best one based on our affinity and topology.
1036 * We prioritize the last cpu that the task executed on since
1037 * it is most likely cache-hot in that location.
1039 if (cpumask_test_cpu(cpu, lowest_mask))
1043 * Otherwise, we consult the sched_domains span maps to figure
1044 * out which cpu is logically closest to our hot cache data.
1046 if (this_cpu == cpu)
1047 this_cpu = -1; /* Skip this_cpu opt if the same */
1049 for_each_domain(cpu, sd) {
1050 if (sd->flags & SD_WAKE_AFFINE) {
1051 cpumask_t domain_mask;
1054 cpumask_and(&domain_mask, sched_domain_span(sd),
1057 best_cpu = pick_optimal_cpu(this_cpu,
1065 * And finally, if there were no matches within the domains
1066 * just give the caller *something* to work with from the compatible
1069 return pick_optimal_cpu(this_cpu, lowest_mask);
1072 /* Will lock the rq it finds */
1073 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1075 struct rq *lowest_rq = NULL;
1079 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1080 cpu = find_lowest_rq(task);
1082 if ((cpu == -1) || (cpu == rq->cpu))
1085 lowest_rq = cpu_rq(cpu);
1087 /* if the prio of this runqueue changed, try again */
1088 if (double_lock_balance(rq, lowest_rq)) {
1090 * We had to unlock the run queue. In
1091 * the mean time, task could have
1092 * migrated already or had its affinity changed.
1093 * Also make sure that it wasn't scheduled on its rq.
1095 if (unlikely(task_rq(task) != rq ||
1096 !cpumask_test_cpu(lowest_rq->cpu,
1097 &task->cpus_allowed) ||
1098 task_running(rq, task) ||
1101 spin_unlock(&lowest_rq->lock);
1107 /* If this rq is still suitable use it. */
1108 if (lowest_rq->rt.highest_prio.curr > task->prio)
1112 double_unlock_balance(rq, lowest_rq);
1120 * If the current CPU has more than one RT task, see if the non
1121 * running task can migrate over to a CPU that is running a task
1122 * of lesser priority.
1124 static int push_rt_task(struct rq *rq)
1126 struct task_struct *next_task;
1127 struct rq *lowest_rq;
1129 int paranoid = RT_MAX_TRIES;
1131 if (!rq->rt.overloaded)
1134 next_task = pick_next_highest_task_rt(rq, -1);
1139 if (unlikely(next_task == rq->curr)) {
1145 * It's possible that the next_task slipped in of
1146 * higher priority than current. If that's the case
1147 * just reschedule current.
1149 if (unlikely(next_task->prio < rq->curr->prio)) {
1150 resched_task(rq->curr);
1154 /* We might release rq lock */
1155 get_task_struct(next_task);
1157 /* find_lock_lowest_rq locks the rq if found */
1158 lowest_rq = find_lock_lowest_rq(next_task, rq);
1160 struct task_struct *task;
1162 * find lock_lowest_rq releases rq->lock
1163 * so it is possible that next_task has changed.
1164 * If it has, then try again.
1166 task = pick_next_highest_task_rt(rq, -1);
1167 if (unlikely(task != next_task) && task && paranoid--) {
1168 put_task_struct(next_task);
1175 deactivate_task(rq, next_task, 0);
1176 set_task_cpu(next_task, lowest_rq->cpu);
1177 activate_task(lowest_rq, next_task, 0);
1179 resched_task(lowest_rq->curr);
1181 double_unlock_balance(rq, lowest_rq);
1185 put_task_struct(next_task);
1191 * TODO: Currently we just use the second highest prio task on
1192 * the queue, and stop when it can't migrate (or there's
1193 * no more RT tasks). There may be a case where a lower
1194 * priority RT task has a different affinity than the
1195 * higher RT task. In this case the lower RT task could
1196 * possibly be able to migrate where as the higher priority
1197 * RT task could not. We currently ignore this issue.
1198 * Enhancements are welcome!
1200 static void push_rt_tasks(struct rq *rq)
1202 /* push_rt_task will return true if it moved an RT */
1203 while (push_rt_task(rq))
1207 static int pull_rt_task(struct rq *this_rq)
1209 int this_cpu = this_rq->cpu, ret = 0, cpu;
1210 struct task_struct *p;
1213 if (likely(!rt_overloaded(this_rq)))
1216 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1217 if (this_cpu == cpu)
1220 src_rq = cpu_rq(cpu);
1223 * Don't bother taking the src_rq->lock if the next highest
1224 * task is known to be lower-priority than our current task.
1225 * This may look racy, but if this value is about to go
1226 * logically higher, the src_rq will push this task away.
1227 * And if its going logically lower, we do not care
1229 if (src_rq->rt.highest_prio.next >=
1230 this_rq->rt.highest_prio.curr)
1234 * We can potentially drop this_rq's lock in
1235 * double_lock_balance, and another CPU could
1238 double_lock_balance(this_rq, src_rq);
1241 * Are there still pullable RT tasks?
1243 if (src_rq->rt.rt_nr_running <= 1)
1246 p = pick_next_highest_task_rt(src_rq, this_cpu);
1249 * Do we have an RT task that preempts
1250 * the to-be-scheduled task?
1252 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1253 WARN_ON(p == src_rq->curr);
1254 WARN_ON(!p->se.on_rq);
1257 * There's a chance that p is higher in priority
1258 * than what's currently running on its cpu.
1259 * This is just that p is wakeing up and hasn't
1260 * had a chance to schedule. We only pull
1261 * p if it is lower in priority than the
1262 * current task on the run queue
1264 if (p->prio < src_rq->curr->prio)
1269 deactivate_task(src_rq, p, 0);
1270 set_task_cpu(p, this_cpu);
1271 activate_task(this_rq, p, 0);
1273 * We continue with the search, just in
1274 * case there's an even higher prio task
1275 * in another runqueue. (low likelyhood
1280 double_unlock_balance(this_rq, src_rq);
1286 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1288 /* Try to pull RT tasks here if we lower this rq's prio */
1289 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1293 static void post_schedule_rt(struct rq *rq)
1296 * If we have more than one rt_task queued, then
1297 * see if we can push the other rt_tasks off to other CPUS.
1298 * Note we may release the rq lock, and since
1299 * the lock was owned by prev, we need to release it
1300 * first via finish_lock_switch and then reaquire it here.
1302 if (unlikely(rq->rt.overloaded)) {
1303 spin_lock_irq(&rq->lock);
1305 spin_unlock_irq(&rq->lock);
1310 * If we are not running and we are not going to reschedule soon, we should
1311 * try to push tasks away now
1313 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1315 if (!task_running(rq, p) &&
1316 !test_tsk_need_resched(rq->curr) &&
1317 rq->rt.overloaded &&
1318 p->rt.nr_cpus_allowed > 1)
1322 static unsigned long
1323 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1324 unsigned long max_load_move,
1325 struct sched_domain *sd, enum cpu_idle_type idle,
1326 int *all_pinned, int *this_best_prio)
1328 /* don't touch RT tasks */
1333 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1334 struct sched_domain *sd, enum cpu_idle_type idle)
1336 /* don't touch RT tasks */
1340 static void set_cpus_allowed_rt(struct task_struct *p,
1341 const struct cpumask *new_mask)
1343 int weight = cpumask_weight(new_mask);
1345 BUG_ON(!rt_task(p));
1348 * Update the migration status of the RQ if we have an RT task
1349 * which is running AND changing its weight value.
1351 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1352 struct rq *rq = task_rq(p);
1354 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1355 rq->rt.rt_nr_migratory++;
1356 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1357 BUG_ON(!rq->rt.rt_nr_migratory);
1358 rq->rt.rt_nr_migratory--;
1361 update_rt_migration(rq);
1364 cpumask_copy(&p->cpus_allowed, new_mask);
1365 p->rt.nr_cpus_allowed = weight;
1368 /* Assumes rq->lock is held */
1369 static void rq_online_rt(struct rq *rq)
1371 if (rq->rt.overloaded)
1372 rt_set_overload(rq);
1374 __enable_runtime(rq);
1376 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1379 /* Assumes rq->lock is held */
1380 static void rq_offline_rt(struct rq *rq)
1382 if (rq->rt.overloaded)
1383 rt_clear_overload(rq);
1385 __disable_runtime(rq);
1387 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1391 * When switch from the rt queue, we bring ourselves to a position
1392 * that we might want to pull RT tasks from other runqueues.
1394 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1398 * If there are other RT tasks then we will reschedule
1399 * and the scheduling of the other RT tasks will handle
1400 * the balancing. But if we are the last RT task
1401 * we may need to handle the pulling of RT tasks
1404 if (!rq->rt.rt_nr_running)
1408 static inline void init_sched_rt_class(void)
1412 for_each_possible_cpu(i)
1413 alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL);
1415 #endif /* CONFIG_SMP */
1418 * When switching a task to RT, we may overload the runqueue
1419 * with RT tasks. In this case we try to push them off to
1422 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1425 int check_resched = 1;
1428 * If we are already running, then there's nothing
1429 * that needs to be done. But if we are not running
1430 * we may need to preempt the current running task.
1431 * If that current running task is also an RT task
1432 * then see if we can move to another run queue.
1436 if (rq->rt.overloaded && push_rt_task(rq) &&
1437 /* Don't resched if we changed runqueues */
1440 #endif /* CONFIG_SMP */
1441 if (check_resched && p->prio < rq->curr->prio)
1442 resched_task(rq->curr);
1447 * Priority of the task has changed. This may cause
1448 * us to initiate a push or pull.
1450 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1451 int oldprio, int running)
1456 * If our priority decreases while running, we
1457 * may need to pull tasks to this runqueue.
1459 if (oldprio < p->prio)
1462 * If there's a higher priority task waiting to run
1463 * then reschedule. Note, the above pull_rt_task
1464 * can release the rq lock and p could migrate.
1465 * Only reschedule if p is still on the same runqueue.
1467 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1470 /* For UP simply resched on drop of prio */
1471 if (oldprio < p->prio)
1473 #endif /* CONFIG_SMP */
1476 * This task is not running, but if it is
1477 * greater than the current running task
1480 if (p->prio < rq->curr->prio)
1481 resched_task(rq->curr);
1485 static void watchdog(struct rq *rq, struct task_struct *p)
1487 unsigned long soft, hard;
1492 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1493 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1495 if (soft != RLIM_INFINITY) {
1499 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1500 if (p->rt.timeout > next)
1501 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1505 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1512 * RR tasks need a special form of timeslice management.
1513 * FIFO tasks have no timeslices.
1515 if (p->policy != SCHED_RR)
1518 if (--p->rt.time_slice)
1521 p->rt.time_slice = DEF_TIMESLICE;
1524 * Requeue to the end of queue if we are not the only element
1527 if (p->rt.run_list.prev != p->rt.run_list.next) {
1528 requeue_task_rt(rq, p, 0);
1529 set_tsk_need_resched(p);
1533 static void set_curr_task_rt(struct rq *rq)
1535 struct task_struct *p = rq->curr;
1537 p->se.exec_start = rq->clock;
1540 static const struct sched_class rt_sched_class = {
1541 .next = &fair_sched_class,
1542 .enqueue_task = enqueue_task_rt,
1543 .dequeue_task = dequeue_task_rt,
1544 .yield_task = yield_task_rt,
1546 .check_preempt_curr = check_preempt_curr_rt,
1548 .pick_next_task = pick_next_task_rt,
1549 .put_prev_task = put_prev_task_rt,
1552 .select_task_rq = select_task_rq_rt,
1554 .load_balance = load_balance_rt,
1555 .move_one_task = move_one_task_rt,
1556 .set_cpus_allowed = set_cpus_allowed_rt,
1557 .rq_online = rq_online_rt,
1558 .rq_offline = rq_offline_rt,
1559 .pre_schedule = pre_schedule_rt,
1560 .post_schedule = post_schedule_rt,
1561 .task_wake_up = task_wake_up_rt,
1562 .switched_from = switched_from_rt,
1565 .set_curr_task = set_curr_task_rt,
1566 .task_tick = task_tick_rt,
1568 .prio_changed = prio_changed_rt,
1569 .switched_to = switched_to_rt,
1572 #ifdef CONFIG_SCHED_DEBUG
1573 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1575 static void print_rt_stats(struct seq_file *m, int cpu)
1577 struct rt_rq *rt_rq;
1580 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1581 print_rt_rq(m, cpu, rt_rq);
1584 #endif /* CONFIG_SCHED_DEBUG */