2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
6 #ifdef CONFIG_RT_GROUP_SCHED
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
12 #ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
15 return container_of(rt_se, struct task_struct, rt);
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
28 #else /* CONFIG_RT_GROUP_SCHED */
30 #define rt_entity_is_task(rt_se) (1)
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
34 return container_of(rt_se, struct task_struct, rt);
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
39 return container_of(rt_rq, struct rq, rt);
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
50 #endif /* CONFIG_RT_GROUP_SCHED */
54 static inline int rt_overloaded(struct rq *rq)
56 return atomic_read(&rq->rd->rto_count);
59 static inline void rt_set_overload(struct rq *rq)
64 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
73 atomic_inc(&rq->rd->rto_count);
76 static inline void rt_clear_overload(struct rq *rq)
81 /* the order here really doesn't matter */
82 atomic_dec(&rq->rd->rto_count);
83 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
86 static void update_rt_migration(struct rt_rq *rt_rq)
88 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
93 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
101 if (!rt_entity_is_task(rt_se))
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
106 rt_rq->rt_nr_total++;
107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
110 update_rt_migration(rt_rq);
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
115 if (!rt_entity_is_task(rt_se))
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
120 rt_rq->rt_nr_total--;
121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
124 update_rt_migration(rt_rq);
127 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
134 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
141 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
145 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
150 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
155 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
159 #endif /* CONFIG_SMP */
161 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
163 return !list_empty(&rt_se->run_list);
166 #ifdef CONFIG_RT_GROUP_SCHED
168 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
173 return rt_rq->rt_runtime;
176 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
178 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
181 #define for_each_leaf_rt_rq(rt_rq, rq) \
182 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
184 #define for_each_sched_rt_entity(rt_se) \
185 for (; rt_se; rt_se = rt_se->parent)
187 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
192 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
193 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
195 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
197 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
198 struct sched_rt_entity *rt_se = rt_rq->rt_se;
200 if (rt_rq->rt_nr_running) {
201 if (rt_se && !on_rt_rq(rt_se))
202 enqueue_rt_entity(rt_se);
203 if (rt_rq->highest_prio.curr < curr->prio)
208 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
210 struct sched_rt_entity *rt_se = rt_rq->rt_se;
212 if (rt_se && on_rt_rq(rt_se))
213 dequeue_rt_entity(rt_se);
216 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
218 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
221 static int rt_se_boosted(struct sched_rt_entity *rt_se)
223 struct rt_rq *rt_rq = group_rt_rq(rt_se);
224 struct task_struct *p;
227 return !!rt_rq->rt_nr_boosted;
229 p = rt_task_of(rt_se);
230 return p->prio != p->normal_prio;
234 static inline const struct cpumask *sched_rt_period_mask(void)
236 return cpu_rq(smp_processor_id())->rd->span;
239 static inline const struct cpumask *sched_rt_period_mask(void)
241 return cpu_online_mask;
246 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
248 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
251 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
253 return &rt_rq->tg->rt_bandwidth;
256 #else /* !CONFIG_RT_GROUP_SCHED */
258 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
260 return rt_rq->rt_runtime;
263 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
265 return ktime_to_ns(def_rt_bandwidth.rt_period);
268 #define for_each_leaf_rt_rq(rt_rq, rq) \
269 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
271 #define for_each_sched_rt_entity(rt_se) \
272 for (; rt_se; rt_se = NULL)
274 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
279 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
281 if (rt_rq->rt_nr_running)
282 resched_task(rq_of_rt_rq(rt_rq)->curr);
285 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
289 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
291 return rt_rq->rt_throttled;
294 static inline const struct cpumask *sched_rt_period_mask(void)
296 return cpu_online_mask;
300 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
302 return &cpu_rq(cpu)->rt;
305 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
307 return &def_rt_bandwidth;
310 #endif /* CONFIG_RT_GROUP_SCHED */
314 * We ran out of runtime, see if we can borrow some from our neighbours.
316 static int do_balance_runtime(struct rt_rq *rt_rq)
318 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
319 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
320 int i, weight, more = 0;
323 weight = cpumask_weight(rd->span);
325 spin_lock(&rt_b->rt_runtime_lock);
326 rt_period = ktime_to_ns(rt_b->rt_period);
327 for_each_cpu(i, rd->span) {
328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
334 spin_lock(&iter->rt_runtime_lock);
336 * Either all rqs have inf runtime and there's nothing to steal
337 * or __disable_runtime() below sets a specific rq to inf to
338 * indicate its been disabled and disalow stealing.
340 if (iter->rt_runtime == RUNTIME_INF)
344 * From runqueues with spare time, take 1/n part of their
345 * spare time, but no more than our period.
347 diff = iter->rt_runtime - iter->rt_time;
349 diff = div_u64((u64)diff, weight);
350 if (rt_rq->rt_runtime + diff > rt_period)
351 diff = rt_period - rt_rq->rt_runtime;
352 iter->rt_runtime -= diff;
353 rt_rq->rt_runtime += diff;
355 if (rt_rq->rt_runtime == rt_period) {
356 spin_unlock(&iter->rt_runtime_lock);
361 spin_unlock(&iter->rt_runtime_lock);
363 spin_unlock(&rt_b->rt_runtime_lock);
369 * Ensure this RQ takes back all the runtime it lend to its neighbours.
371 static void __disable_runtime(struct rq *rq)
373 struct root_domain *rd = rq->rd;
376 if (unlikely(!scheduler_running))
379 for_each_leaf_rt_rq(rt_rq, rq) {
380 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
384 spin_lock(&rt_b->rt_runtime_lock);
385 spin_lock(&rt_rq->rt_runtime_lock);
387 * Either we're all inf and nobody needs to borrow, or we're
388 * already disabled and thus have nothing to do, or we have
389 * exactly the right amount of runtime to take out.
391 if (rt_rq->rt_runtime == RUNTIME_INF ||
392 rt_rq->rt_runtime == rt_b->rt_runtime)
394 spin_unlock(&rt_rq->rt_runtime_lock);
397 * Calculate the difference between what we started out with
398 * and what we current have, that's the amount of runtime
399 * we lend and now have to reclaim.
401 want = rt_b->rt_runtime - rt_rq->rt_runtime;
404 * Greedy reclaim, take back as much as we can.
406 for_each_cpu(i, rd->span) {
407 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
411 * Can't reclaim from ourselves or disabled runqueues.
413 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
416 spin_lock(&iter->rt_runtime_lock);
418 diff = min_t(s64, iter->rt_runtime, want);
419 iter->rt_runtime -= diff;
422 iter->rt_runtime -= want;
425 spin_unlock(&iter->rt_runtime_lock);
431 spin_lock(&rt_rq->rt_runtime_lock);
433 * We cannot be left wanting - that would mean some runtime
434 * leaked out of the system.
439 * Disable all the borrow logic by pretending we have inf
440 * runtime - in which case borrowing doesn't make sense.
442 rt_rq->rt_runtime = RUNTIME_INF;
443 spin_unlock(&rt_rq->rt_runtime_lock);
444 spin_unlock(&rt_b->rt_runtime_lock);
448 static void disable_runtime(struct rq *rq)
452 spin_lock_irqsave(&rq->lock, flags);
453 __disable_runtime(rq);
454 spin_unlock_irqrestore(&rq->lock, flags);
457 static void __enable_runtime(struct rq *rq)
461 if (unlikely(!scheduler_running))
465 * Reset each runqueue's bandwidth settings
467 for_each_leaf_rt_rq(rt_rq, rq) {
468 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
470 spin_lock(&rt_b->rt_runtime_lock);
471 spin_lock(&rt_rq->rt_runtime_lock);
472 rt_rq->rt_runtime = rt_b->rt_runtime;
474 rt_rq->rt_throttled = 0;
475 spin_unlock(&rt_rq->rt_runtime_lock);
476 spin_unlock(&rt_b->rt_runtime_lock);
480 static void enable_runtime(struct rq *rq)
484 spin_lock_irqsave(&rq->lock, flags);
485 __enable_runtime(rq);
486 spin_unlock_irqrestore(&rq->lock, flags);
489 static int balance_runtime(struct rt_rq *rt_rq)
493 if (rt_rq->rt_time > rt_rq->rt_runtime) {
494 spin_unlock(&rt_rq->rt_runtime_lock);
495 more = do_balance_runtime(rt_rq);
496 spin_lock(&rt_rq->rt_runtime_lock);
501 #else /* !CONFIG_SMP */
502 static inline int balance_runtime(struct rt_rq *rt_rq)
506 #endif /* CONFIG_SMP */
508 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
511 const struct cpumask *span;
513 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
516 span = sched_rt_period_mask();
517 for_each_cpu(i, span) {
519 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
520 struct rq *rq = rq_of_rt_rq(rt_rq);
522 spin_lock(&rq->lock);
523 if (rt_rq->rt_time) {
526 spin_lock(&rt_rq->rt_runtime_lock);
527 if (rt_rq->rt_throttled)
528 balance_runtime(rt_rq);
529 runtime = rt_rq->rt_runtime;
530 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
531 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
532 rt_rq->rt_throttled = 0;
535 if (rt_rq->rt_time || rt_rq->rt_nr_running)
537 spin_unlock(&rt_rq->rt_runtime_lock);
538 } else if (rt_rq->rt_nr_running)
542 sched_rt_rq_enqueue(rt_rq);
543 spin_unlock(&rq->lock);
549 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
551 #ifdef CONFIG_RT_GROUP_SCHED
552 struct rt_rq *rt_rq = group_rt_rq(rt_se);
555 return rt_rq->highest_prio.curr;
558 return rt_task_of(rt_se)->prio;
561 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
563 u64 runtime = sched_rt_runtime(rt_rq);
565 if (rt_rq->rt_throttled)
566 return rt_rq_throttled(rt_rq);
568 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
571 balance_runtime(rt_rq);
572 runtime = sched_rt_runtime(rt_rq);
573 if (runtime == RUNTIME_INF)
576 if (rt_rq->rt_time > runtime) {
577 rt_rq->rt_throttled = 1;
578 if (rt_rq_throttled(rt_rq)) {
579 sched_rt_rq_dequeue(rt_rq);
588 * Update the current task's runtime statistics. Skip current tasks that
589 * are not in our scheduling class.
591 static void update_curr_rt(struct rq *rq)
593 struct task_struct *curr = rq->curr;
594 struct sched_rt_entity *rt_se = &curr->rt;
595 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
598 if (!task_has_rt_policy(curr))
601 delta_exec = rq->clock - curr->se.exec_start;
602 if (unlikely((s64)delta_exec < 0))
605 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
607 curr->se.sum_exec_runtime += delta_exec;
608 account_group_exec_runtime(curr, delta_exec);
610 curr->se.exec_start = rq->clock;
611 cpuacct_charge(curr, delta_exec);
613 if (!rt_bandwidth_enabled())
616 for_each_sched_rt_entity(rt_se) {
617 rt_rq = rt_rq_of_se(rt_se);
619 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
620 spin_lock(&rt_rq->rt_runtime_lock);
621 rt_rq->rt_time += delta_exec;
622 if (sched_rt_runtime_exceeded(rt_rq))
624 spin_unlock(&rt_rq->rt_runtime_lock);
629 #if defined CONFIG_SMP
631 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
633 static inline int next_prio(struct rq *rq)
635 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
637 if (next && rt_prio(next->prio))
644 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
646 struct rq *rq = rq_of_rt_rq(rt_rq);
648 if (prio < prev_prio) {
651 * If the new task is higher in priority than anything on the
652 * run-queue, we know that the previous high becomes our
655 rt_rq->highest_prio.next = prev_prio;
658 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
660 } else if (prio == rt_rq->highest_prio.curr)
662 * If the next task is equal in priority to the highest on
663 * the run-queue, then we implicitly know that the next highest
664 * task cannot be any lower than current
666 rt_rq->highest_prio.next = prio;
667 else if (prio < rt_rq->highest_prio.next)
669 * Otherwise, we need to recompute next-highest
671 rt_rq->highest_prio.next = next_prio(rq);
675 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
677 struct rq *rq = rq_of_rt_rq(rt_rq);
679 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
680 rt_rq->highest_prio.next = next_prio(rq);
682 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
683 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
686 #else /* CONFIG_SMP */
689 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
691 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
693 #endif /* CONFIG_SMP */
695 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
697 inc_rt_prio(struct rt_rq *rt_rq, int prio)
699 int prev_prio = rt_rq->highest_prio.curr;
701 if (prio < prev_prio)
702 rt_rq->highest_prio.curr = prio;
704 inc_rt_prio_smp(rt_rq, prio, prev_prio);
708 dec_rt_prio(struct rt_rq *rt_rq, int prio)
710 int prev_prio = rt_rq->highest_prio.curr;
712 if (rt_rq->rt_nr_running) {
714 WARN_ON(prio < prev_prio);
717 * This may have been our highest task, and therefore
718 * we may have some recomputation to do
720 if (prio == prev_prio) {
721 struct rt_prio_array *array = &rt_rq->active;
723 rt_rq->highest_prio.curr =
724 sched_find_first_bit(array->bitmap);
728 rt_rq->highest_prio.curr = MAX_RT_PRIO;
730 dec_rt_prio_smp(rt_rq, prio, prev_prio);
735 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
736 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
738 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
740 #ifdef CONFIG_RT_GROUP_SCHED
743 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
745 if (rt_se_boosted(rt_se))
746 rt_rq->rt_nr_boosted++;
749 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
753 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
755 if (rt_se_boosted(rt_se))
756 rt_rq->rt_nr_boosted--;
758 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
761 #else /* CONFIG_RT_GROUP_SCHED */
764 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
766 start_rt_bandwidth(&def_rt_bandwidth);
770 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
772 #endif /* CONFIG_RT_GROUP_SCHED */
775 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
777 int prio = rt_se_prio(rt_se);
779 WARN_ON(!rt_prio(prio));
780 rt_rq->rt_nr_running++;
782 inc_rt_prio(rt_rq, prio);
783 inc_rt_migration(rt_se, rt_rq);
784 inc_rt_group(rt_se, rt_rq);
788 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
790 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
791 WARN_ON(!rt_rq->rt_nr_running);
792 rt_rq->rt_nr_running--;
794 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
795 dec_rt_migration(rt_se, rt_rq);
796 dec_rt_group(rt_se, rt_rq);
799 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
801 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
802 struct rt_prio_array *array = &rt_rq->active;
803 struct rt_rq *group_rq = group_rt_rq(rt_se);
804 struct list_head *queue = array->queue + rt_se_prio(rt_se);
807 * Don't enqueue the group if its throttled, or when empty.
808 * The latter is a consequence of the former when a child group
809 * get throttled and the current group doesn't have any other
812 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
815 list_add_tail(&rt_se->run_list, queue);
816 __set_bit(rt_se_prio(rt_se), array->bitmap);
818 inc_rt_tasks(rt_se, rt_rq);
821 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
823 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
824 struct rt_prio_array *array = &rt_rq->active;
826 list_del_init(&rt_se->run_list);
827 if (list_empty(array->queue + rt_se_prio(rt_se)))
828 __clear_bit(rt_se_prio(rt_se), array->bitmap);
830 dec_rt_tasks(rt_se, rt_rq);
834 * Because the prio of an upper entry depends on the lower
835 * entries, we must remove entries top - down.
837 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
839 struct sched_rt_entity *back = NULL;
841 for_each_sched_rt_entity(rt_se) {
846 for (rt_se = back; rt_se; rt_se = rt_se->back) {
848 __dequeue_rt_entity(rt_se);
852 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
854 dequeue_rt_stack(rt_se);
855 for_each_sched_rt_entity(rt_se)
856 __enqueue_rt_entity(rt_se);
859 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
861 dequeue_rt_stack(rt_se);
863 for_each_sched_rt_entity(rt_se) {
864 struct rt_rq *rt_rq = group_rt_rq(rt_se);
866 if (rt_rq && rt_rq->rt_nr_running)
867 __enqueue_rt_entity(rt_se);
872 * Adding/removing a task to/from a priority array:
874 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
876 struct sched_rt_entity *rt_se = &p->rt;
881 enqueue_rt_entity(rt_se);
883 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
884 enqueue_pushable_task(rq, p);
886 inc_cpu_load(rq, p->se.load.weight);
889 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
891 struct sched_rt_entity *rt_se = &p->rt;
894 dequeue_rt_entity(rt_se);
896 dequeue_pushable_task(rq, p);
898 dec_cpu_load(rq, p->se.load.weight);
902 * Put task to the end of the run list without the overhead of dequeue
903 * followed by enqueue.
906 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
908 if (on_rt_rq(rt_se)) {
909 struct rt_prio_array *array = &rt_rq->active;
910 struct list_head *queue = array->queue + rt_se_prio(rt_se);
913 list_move(&rt_se->run_list, queue);
915 list_move_tail(&rt_se->run_list, queue);
919 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
921 struct sched_rt_entity *rt_se = &p->rt;
924 for_each_sched_rt_entity(rt_se) {
925 rt_rq = rt_rq_of_se(rt_se);
926 requeue_rt_entity(rt_rq, rt_se, head);
930 static void yield_task_rt(struct rq *rq)
932 requeue_task_rt(rq, rq->curr, 0);
936 static int find_lowest_rq(struct task_struct *task);
938 static int select_task_rq_rt(struct task_struct *p, int sync)
940 struct rq *rq = task_rq(p);
943 * If the current task is an RT task, then
944 * try to see if we can wake this RT task up on another
945 * runqueue. Otherwise simply start this RT task
946 * on its current runqueue.
948 * We want to avoid overloading runqueues. Even if
949 * the RT task is of higher priority than the current RT task.
950 * RT tasks behave differently than other tasks. If
951 * one gets preempted, we try to push it off to another queue.
952 * So trying to keep a preempting RT task on the same
953 * cache hot CPU will force the running RT task to
954 * a cold CPU. So we waste all the cache for the lower
955 * RT task in hopes of saving some of a RT task
956 * that is just being woken and probably will have
959 if (unlikely(rt_task(rq->curr)) &&
960 (p->rt.nr_cpus_allowed > 1)) {
961 int cpu = find_lowest_rq(p);
963 return (cpu == -1) ? task_cpu(p) : cpu;
967 * Otherwise, just let it ride on the affined RQ and the
968 * post-schedule router will push the preempted task away
973 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
975 if (rq->curr->rt.nr_cpus_allowed == 1)
978 if (p->rt.nr_cpus_allowed != 1
979 && cpupri_find(&rq->rd->cpupri, p, NULL))
982 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
986 * There appears to be other cpus that can accept
987 * current and none to run 'p', so lets reschedule
988 * to try and push current away:
990 requeue_task_rt(rq, p, 1);
991 resched_task(rq->curr);
994 #endif /* CONFIG_SMP */
997 * Preempt the current task with a newly woken task if needed:
999 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
1001 if (p->prio < rq->curr->prio) {
1002 resched_task(rq->curr);
1010 * - the newly woken task is of equal priority to the current task
1011 * - the newly woken task is non-migratable while current is migratable
1012 * - current will be preempted on the next reschedule
1014 * we should check to see if current can readily move to a different
1015 * cpu. If so, we will reschedule to allow the push logic to try
1016 * to move current somewhere else, making room for our non-migratable
1019 if (p->prio == rq->curr->prio && !need_resched())
1020 check_preempt_equal_prio(rq, p);
1024 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1025 struct rt_rq *rt_rq)
1027 struct rt_prio_array *array = &rt_rq->active;
1028 struct sched_rt_entity *next = NULL;
1029 struct list_head *queue;
1032 idx = sched_find_first_bit(array->bitmap);
1033 BUG_ON(idx >= MAX_RT_PRIO);
1035 queue = array->queue + idx;
1036 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1041 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1043 struct sched_rt_entity *rt_se;
1044 struct task_struct *p;
1045 struct rt_rq *rt_rq;
1049 if (unlikely(!rt_rq->rt_nr_running))
1052 if (rt_rq_throttled(rt_rq))
1056 rt_se = pick_next_rt_entity(rq, rt_rq);
1058 rt_rq = group_rt_rq(rt_se);
1061 p = rt_task_of(rt_se);
1062 p->se.exec_start = rq->clock;
1067 static inline int has_pushable_tasks(struct rq *rq)
1069 return !plist_head_empty(&rq->rt.pushable_tasks);
1072 static struct task_struct *pick_next_task_rt(struct rq *rq)
1074 struct task_struct *p = _pick_next_task_rt(rq);
1076 /* The running task is never eligible for pushing */
1078 dequeue_pushable_task(rq, p);
1081 * We detect this state here so that we can avoid taking the RQ
1082 * lock again later if there is no need to push
1084 rq->post_schedule = has_pushable_tasks(rq);
1089 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1092 p->se.exec_start = 0;
1095 * The previous task needs to be made eligible for pushing
1096 * if it is still active
1098 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1099 enqueue_pushable_task(rq, p);
1104 /* Only try algorithms three times */
1105 #define RT_MAX_TRIES 3
1107 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1109 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1111 if (!task_running(rq, p) &&
1112 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1113 (p->rt.nr_cpus_allowed > 1))
1118 /* Return the second highest RT task, NULL otherwise */
1119 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1121 struct task_struct *next = NULL;
1122 struct sched_rt_entity *rt_se;
1123 struct rt_prio_array *array;
1124 struct rt_rq *rt_rq;
1127 for_each_leaf_rt_rq(rt_rq, rq) {
1128 array = &rt_rq->active;
1129 idx = sched_find_first_bit(array->bitmap);
1131 if (idx >= MAX_RT_PRIO)
1133 if (next && next->prio < idx)
1135 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1136 struct task_struct *p = rt_task_of(rt_se);
1137 if (pick_rt_task(rq, p, cpu)) {
1143 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1151 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1153 static inline int pick_optimal_cpu(int this_cpu,
1154 const struct cpumask *mask)
1158 /* "this_cpu" is cheaper to preempt than a remote processor */
1159 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
1162 first = cpumask_first(mask);
1163 if (first < nr_cpu_ids)
1169 static int find_lowest_rq(struct task_struct *task)
1171 struct sched_domain *sd;
1172 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1173 int this_cpu = smp_processor_id();
1174 int cpu = task_cpu(task);
1175 cpumask_var_t domain_mask;
1177 if (task->rt.nr_cpus_allowed == 1)
1178 return -1; /* No other targets possible */
1180 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1181 return -1; /* No targets found */
1184 * At this point we have built a mask of cpus representing the
1185 * lowest priority tasks in the system. Now we want to elect
1186 * the best one based on our affinity and topology.
1188 * We prioritize the last cpu that the task executed on since
1189 * it is most likely cache-hot in that location.
1191 if (cpumask_test_cpu(cpu, lowest_mask))
1195 * Otherwise, we consult the sched_domains span maps to figure
1196 * out which cpu is logically closest to our hot cache data.
1198 if (this_cpu == cpu)
1199 this_cpu = -1; /* Skip this_cpu opt if the same */
1201 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1202 for_each_domain(cpu, sd) {
1203 if (sd->flags & SD_WAKE_AFFINE) {
1206 cpumask_and(domain_mask,
1207 sched_domain_span(sd),
1210 best_cpu = pick_optimal_cpu(this_cpu,
1213 if (best_cpu != -1) {
1214 free_cpumask_var(domain_mask);
1219 free_cpumask_var(domain_mask);
1223 * And finally, if there were no matches within the domains
1224 * just give the caller *something* to work with from the compatible
1227 return pick_optimal_cpu(this_cpu, lowest_mask);
1230 /* Will lock the rq it finds */
1231 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1233 struct rq *lowest_rq = NULL;
1237 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1238 cpu = find_lowest_rq(task);
1240 if ((cpu == -1) || (cpu == rq->cpu))
1243 lowest_rq = cpu_rq(cpu);
1245 /* if the prio of this runqueue changed, try again */
1246 if (double_lock_balance(rq, lowest_rq)) {
1248 * We had to unlock the run queue. In
1249 * the mean time, task could have
1250 * migrated already or had its affinity changed.
1251 * Also make sure that it wasn't scheduled on its rq.
1253 if (unlikely(task_rq(task) != rq ||
1254 !cpumask_test_cpu(lowest_rq->cpu,
1255 &task->cpus_allowed) ||
1256 task_running(rq, task) ||
1259 spin_unlock(&lowest_rq->lock);
1265 /* If this rq is still suitable use it. */
1266 if (lowest_rq->rt.highest_prio.curr > task->prio)
1270 double_unlock_balance(rq, lowest_rq);
1277 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1279 struct task_struct *p;
1281 if (!has_pushable_tasks(rq))
1284 p = plist_first_entry(&rq->rt.pushable_tasks,
1285 struct task_struct, pushable_tasks);
1287 BUG_ON(rq->cpu != task_cpu(p));
1288 BUG_ON(task_current(rq, p));
1289 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1291 BUG_ON(!p->se.on_rq);
1292 BUG_ON(!rt_task(p));
1298 * If the current CPU has more than one RT task, see if the non
1299 * running task can migrate over to a CPU that is running a task
1300 * of lesser priority.
1302 static int push_rt_task(struct rq *rq)
1304 struct task_struct *next_task;
1305 struct rq *lowest_rq;
1307 if (!rq->rt.overloaded)
1310 next_task = pick_next_pushable_task(rq);
1315 if (unlikely(next_task == rq->curr)) {
1321 * It's possible that the next_task slipped in of
1322 * higher priority than current. If that's the case
1323 * just reschedule current.
1325 if (unlikely(next_task->prio < rq->curr->prio)) {
1326 resched_task(rq->curr);
1330 /* We might release rq lock */
1331 get_task_struct(next_task);
1333 /* find_lock_lowest_rq locks the rq if found */
1334 lowest_rq = find_lock_lowest_rq(next_task, rq);
1336 struct task_struct *task;
1338 * find lock_lowest_rq releases rq->lock
1339 * so it is possible that next_task has migrated.
1341 * We need to make sure that the task is still on the same
1342 * run-queue and is also still the next task eligible for
1345 task = pick_next_pushable_task(rq);
1346 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1348 * If we get here, the task hasnt moved at all, but
1349 * it has failed to push. We will not try again,
1350 * since the other cpus will pull from us when they
1353 dequeue_pushable_task(rq, next_task);
1358 /* No more tasks, just exit */
1362 * Something has shifted, try again.
1364 put_task_struct(next_task);
1369 deactivate_task(rq, next_task, 0);
1370 set_task_cpu(next_task, lowest_rq->cpu);
1371 activate_task(lowest_rq, next_task, 0);
1373 resched_task(lowest_rq->curr);
1375 double_unlock_balance(rq, lowest_rq);
1378 put_task_struct(next_task);
1383 static void push_rt_tasks(struct rq *rq)
1385 /* push_rt_task will return true if it moved an RT */
1386 while (push_rt_task(rq))
1390 static int pull_rt_task(struct rq *this_rq)
1392 int this_cpu = this_rq->cpu, ret = 0, cpu;
1393 struct task_struct *p;
1396 if (likely(!rt_overloaded(this_rq)))
1399 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1400 if (this_cpu == cpu)
1403 src_rq = cpu_rq(cpu);
1406 * Don't bother taking the src_rq->lock if the next highest
1407 * task is known to be lower-priority than our current task.
1408 * This may look racy, but if this value is about to go
1409 * logically higher, the src_rq will push this task away.
1410 * And if its going logically lower, we do not care
1412 if (src_rq->rt.highest_prio.next >=
1413 this_rq->rt.highest_prio.curr)
1417 * We can potentially drop this_rq's lock in
1418 * double_lock_balance, and another CPU could
1421 double_lock_balance(this_rq, src_rq);
1424 * Are there still pullable RT tasks?
1426 if (src_rq->rt.rt_nr_running <= 1)
1429 p = pick_next_highest_task_rt(src_rq, this_cpu);
1432 * Do we have an RT task that preempts
1433 * the to-be-scheduled task?
1435 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1436 WARN_ON(p == src_rq->curr);
1437 WARN_ON(!p->se.on_rq);
1440 * There's a chance that p is higher in priority
1441 * than what's currently running on its cpu.
1442 * This is just that p is wakeing up and hasn't
1443 * had a chance to schedule. We only pull
1444 * p if it is lower in priority than the
1445 * current task on the run queue
1447 if (p->prio < src_rq->curr->prio)
1452 deactivate_task(src_rq, p, 0);
1453 set_task_cpu(p, this_cpu);
1454 activate_task(this_rq, p, 0);
1456 * We continue with the search, just in
1457 * case there's an even higher prio task
1458 * in another runqueue. (low likelyhood
1463 double_unlock_balance(this_rq, src_rq);
1469 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1471 /* Try to pull RT tasks here if we lower this rq's prio */
1472 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1476 static void post_schedule_rt(struct rq *rq)
1482 * If we are not running and we are not going to reschedule soon, we should
1483 * try to push tasks away now
1485 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1487 if (!task_running(rq, p) &&
1488 !test_tsk_need_resched(rq->curr) &&
1489 has_pushable_tasks(rq) &&
1490 p->rt.nr_cpus_allowed > 1)
1494 static unsigned long
1495 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1496 unsigned long max_load_move,
1497 struct sched_domain *sd, enum cpu_idle_type idle,
1498 int *all_pinned, int *this_best_prio)
1500 /* don't touch RT tasks */
1505 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1506 struct sched_domain *sd, enum cpu_idle_type idle)
1508 /* don't touch RT tasks */
1512 static void set_cpus_allowed_rt(struct task_struct *p,
1513 const struct cpumask *new_mask)
1515 int weight = cpumask_weight(new_mask);
1517 BUG_ON(!rt_task(p));
1520 * Update the migration status of the RQ if we have an RT task
1521 * which is running AND changing its weight value.
1523 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1524 struct rq *rq = task_rq(p);
1526 if (!task_current(rq, p)) {
1528 * Make sure we dequeue this task from the pushable list
1529 * before going further. It will either remain off of
1530 * the list because we are no longer pushable, or it
1533 if (p->rt.nr_cpus_allowed > 1)
1534 dequeue_pushable_task(rq, p);
1537 * Requeue if our weight is changing and still > 1
1540 enqueue_pushable_task(rq, p);
1544 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1545 rq->rt.rt_nr_migratory++;
1546 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1547 BUG_ON(!rq->rt.rt_nr_migratory);
1548 rq->rt.rt_nr_migratory--;
1551 update_rt_migration(&rq->rt);
1554 cpumask_copy(&p->cpus_allowed, new_mask);
1555 p->rt.nr_cpus_allowed = weight;
1558 /* Assumes rq->lock is held */
1559 static void rq_online_rt(struct rq *rq)
1561 if (rq->rt.overloaded)
1562 rt_set_overload(rq);
1564 __enable_runtime(rq);
1566 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1569 /* Assumes rq->lock is held */
1570 static void rq_offline_rt(struct rq *rq)
1572 if (rq->rt.overloaded)
1573 rt_clear_overload(rq);
1575 __disable_runtime(rq);
1577 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1581 * When switch from the rt queue, we bring ourselves to a position
1582 * that we might want to pull RT tasks from other runqueues.
1584 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1588 * If there are other RT tasks then we will reschedule
1589 * and the scheduling of the other RT tasks will handle
1590 * the balancing. But if we are the last RT task
1591 * we may need to handle the pulling of RT tasks
1594 if (!rq->rt.rt_nr_running)
1598 static inline void init_sched_rt_class(void)
1602 for_each_possible_cpu(i)
1603 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1604 GFP_KERNEL, cpu_to_node(i));
1606 #endif /* CONFIG_SMP */
1609 * When switching a task to RT, we may overload the runqueue
1610 * with RT tasks. In this case we try to push them off to
1613 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1616 int check_resched = 1;
1619 * If we are already running, then there's nothing
1620 * that needs to be done. But if we are not running
1621 * we may need to preempt the current running task.
1622 * If that current running task is also an RT task
1623 * then see if we can move to another run queue.
1627 if (rq->rt.overloaded && push_rt_task(rq) &&
1628 /* Don't resched if we changed runqueues */
1631 #endif /* CONFIG_SMP */
1632 if (check_resched && p->prio < rq->curr->prio)
1633 resched_task(rq->curr);
1638 * Priority of the task has changed. This may cause
1639 * us to initiate a push or pull.
1641 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1642 int oldprio, int running)
1647 * If our priority decreases while running, we
1648 * may need to pull tasks to this runqueue.
1650 if (oldprio < p->prio)
1653 * If there's a higher priority task waiting to run
1654 * then reschedule. Note, the above pull_rt_task
1655 * can release the rq lock and p could migrate.
1656 * Only reschedule if p is still on the same runqueue.
1658 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1661 /* For UP simply resched on drop of prio */
1662 if (oldprio < p->prio)
1664 #endif /* CONFIG_SMP */
1667 * This task is not running, but if it is
1668 * greater than the current running task
1671 if (p->prio < rq->curr->prio)
1672 resched_task(rq->curr);
1676 static void watchdog(struct rq *rq, struct task_struct *p)
1678 unsigned long soft, hard;
1683 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1684 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1686 if (soft != RLIM_INFINITY) {
1690 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1691 if (p->rt.timeout > next)
1692 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1696 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1703 * RR tasks need a special form of timeslice management.
1704 * FIFO tasks have no timeslices.
1706 if (p->policy != SCHED_RR)
1709 if (--p->rt.time_slice)
1712 p->rt.time_slice = DEF_TIMESLICE;
1715 * Requeue to the end of queue if we are not the only element
1718 if (p->rt.run_list.prev != p->rt.run_list.next) {
1719 requeue_task_rt(rq, p, 0);
1720 set_tsk_need_resched(p);
1724 static void set_curr_task_rt(struct rq *rq)
1726 struct task_struct *p = rq->curr;
1728 p->se.exec_start = rq->clock;
1730 /* The running task is never eligible for pushing */
1731 dequeue_pushable_task(rq, p);
1734 static const struct sched_class rt_sched_class = {
1735 .next = &fair_sched_class,
1736 .enqueue_task = enqueue_task_rt,
1737 .dequeue_task = dequeue_task_rt,
1738 .yield_task = yield_task_rt,
1740 .check_preempt_curr = check_preempt_curr_rt,
1742 .pick_next_task = pick_next_task_rt,
1743 .put_prev_task = put_prev_task_rt,
1746 .select_task_rq = select_task_rq_rt,
1748 .load_balance = load_balance_rt,
1749 .move_one_task = move_one_task_rt,
1750 .set_cpus_allowed = set_cpus_allowed_rt,
1751 .rq_online = rq_online_rt,
1752 .rq_offline = rq_offline_rt,
1753 .pre_schedule = pre_schedule_rt,
1754 .post_schedule = post_schedule_rt,
1755 .task_wake_up = task_wake_up_rt,
1756 .switched_from = switched_from_rt,
1759 .set_curr_task = set_curr_task_rt,
1760 .task_tick = task_tick_rt,
1762 .prio_changed = prio_changed_rt,
1763 .switched_to = switched_to_rt,
1766 #ifdef CONFIG_SCHED_DEBUG
1767 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1769 static void print_rt_stats(struct seq_file *m, int cpu)
1771 struct rt_rq *rt_rq;
1774 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1775 print_rt_rq(m, cpu, rt_rq);
1778 #endif /* CONFIG_SCHED_DEBUG */