2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
18 cpu_set(rq->cpu, rq->rd->rto_mask);
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
27 atomic_inc(&rq->rd->rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
40 static void update_rt_migration(struct rq *rq)
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
45 rq->rt.overloaded = 1;
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
52 #endif /* CONFIG_SMP */
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
56 return container_of(rt_se, struct task_struct, rt);
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
61 return !list_empty(&rt_se->run_list);
64 #ifdef CONFIG_RT_GROUP_SCHED
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
71 return rt_rq->rt_runtime;
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
92 #define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio)
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
135 return !!rt_rq->rt_nr_boosted;
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
142 static inline cpumask_t sched_rt_period_mask(void)
144 return cpu_rq(smp_processor_id())->rd->span;
147 static inline cpumask_t sched_rt_period_mask(void)
149 return cpu_online_map;
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
161 return &rt_rq->tg->rt_bandwidth;
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
168 return rt_rq->rt_runtime;
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
181 return container_of(rt_rq, struct rq, rt);
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
192 #define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
204 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
208 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
210 return rt_rq->rt_throttled;
213 static inline cpumask_t sched_rt_period_mask(void)
215 return cpu_online_map;
219 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
221 return &cpu_rq(cpu)->rt;
224 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
226 return &def_rt_bandwidth;
232 static int do_balance_runtime(struct rt_rq *rt_rq);
234 static int balance_runtime(struct rt_rq *rt_rq)
238 if (rt_rq->rt_time > rt_rq->rt_runtime) {
239 spin_unlock(&rt_rq->rt_runtime_lock);
240 more = do_balance_runtime(rt_rq);
241 spin_lock(&rt_rq->rt_runtime_lock);
247 static inline int balance_runtime(struct rt_rq *rt_rq)
253 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
258 if (rt_b->rt_runtime == RUNTIME_INF)
261 span = sched_rt_period_mask();
262 for_each_cpu_mask(i, span) {
264 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
265 struct rq *rq = rq_of_rt_rq(rt_rq);
267 spin_lock(&rq->lock);
268 if (rt_rq->rt_time) {
271 spin_lock(&rt_rq->rt_runtime_lock);
272 if (rt_rq->rt_throttled)
273 balance_runtime(rt_rq);
274 runtime = rt_rq->rt_runtime;
275 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
276 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
277 rt_rq->rt_throttled = 0;
280 if (rt_rq->rt_time || rt_rq->rt_nr_running)
282 spin_unlock(&rt_rq->rt_runtime_lock);
286 sched_rt_rq_enqueue(rt_rq);
287 spin_unlock(&rq->lock);
294 static int do_balance_runtime(struct rt_rq *rt_rq)
296 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
297 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
298 int i, weight, more = 0;
301 weight = cpus_weight(rd->span);
303 spin_lock(&rt_b->rt_runtime_lock);
304 rt_period = ktime_to_ns(rt_b->rt_period);
305 for_each_cpu_mask(i, rd->span) {
306 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
312 spin_lock(&iter->rt_runtime_lock);
313 if (iter->rt_runtime == RUNTIME_INF)
316 diff = iter->rt_runtime - iter->rt_time;
318 do_div(diff, weight);
319 if (rt_rq->rt_runtime + diff > rt_period)
320 diff = rt_period - rt_rq->rt_runtime;
321 iter->rt_runtime -= diff;
322 rt_rq->rt_runtime += diff;
324 if (rt_rq->rt_runtime == rt_period) {
325 spin_unlock(&iter->rt_runtime_lock);
330 spin_unlock(&iter->rt_runtime_lock);
332 spin_unlock(&rt_b->rt_runtime_lock);
337 static void __disable_runtime(struct rq *rq)
339 struct root_domain *rd = rq->rd;
342 if (unlikely(!scheduler_running))
345 for_each_leaf_rt_rq(rt_rq, rq) {
346 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
350 spin_lock(&rt_b->rt_runtime_lock);
351 spin_lock(&rt_rq->rt_runtime_lock);
352 if (rt_rq->rt_runtime == RUNTIME_INF ||
353 rt_rq->rt_runtime == rt_b->rt_runtime)
355 spin_unlock(&rt_rq->rt_runtime_lock);
357 want = rt_b->rt_runtime - rt_rq->rt_runtime;
359 for_each_cpu_mask(i, rd->span) {
360 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
366 spin_lock(&iter->rt_runtime_lock);
368 diff = min_t(s64, iter->rt_runtime, want);
369 iter->rt_runtime -= diff;
372 iter->rt_runtime -= want;
375 spin_unlock(&iter->rt_runtime_lock);
381 spin_lock(&rt_rq->rt_runtime_lock);
384 rt_rq->rt_runtime = RUNTIME_INF;
385 spin_unlock(&rt_rq->rt_runtime_lock);
386 spin_unlock(&rt_b->rt_runtime_lock);
390 static void disable_runtime(struct rq *rq)
394 spin_lock_irqsave(&rq->lock, flags);
395 __disable_runtime(rq);
396 spin_unlock_irqrestore(&rq->lock, flags);
399 static void __enable_runtime(struct rq *rq)
401 struct root_domain *rd = rq->rd;
404 if (unlikely(!scheduler_running))
407 for_each_leaf_rt_rq(rt_rq, rq) {
408 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
410 spin_lock(&rt_b->rt_runtime_lock);
411 spin_lock(&rt_rq->rt_runtime_lock);
412 rt_rq->rt_runtime = rt_b->rt_runtime;
414 spin_unlock(&rt_rq->rt_runtime_lock);
415 spin_unlock(&rt_b->rt_runtime_lock);
419 static void enable_runtime(struct rq *rq)
423 spin_lock_irqsave(&rq->lock, flags);
424 __enable_runtime(rq);
425 spin_unlock_irqrestore(&rq->lock, flags);
430 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
432 #ifdef CONFIG_RT_GROUP_SCHED
433 struct rt_rq *rt_rq = group_rt_rq(rt_se);
436 return rt_rq->highest_prio;
439 return rt_task_of(rt_se)->prio;
442 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
444 u64 runtime = sched_rt_runtime(rt_rq);
446 if (runtime == RUNTIME_INF)
449 if (rt_rq->rt_throttled)
450 return rt_rq_throttled(rt_rq);
452 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
455 balance_runtime(rt_rq);
456 runtime = sched_rt_runtime(rt_rq);
457 if (runtime == RUNTIME_INF)
460 if (rt_rq->rt_time > runtime) {
461 rt_rq->rt_throttled = 1;
462 if (rt_rq_throttled(rt_rq)) {
463 sched_rt_rq_dequeue(rt_rq);
472 * Update the current task's runtime statistics. Skip current tasks that
473 * are not in our scheduling class.
475 static void update_curr_rt(struct rq *rq)
477 struct task_struct *curr = rq->curr;
478 struct sched_rt_entity *rt_se = &curr->rt;
479 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
482 if (!task_has_rt_policy(curr))
485 delta_exec = rq->clock - curr->se.exec_start;
486 if (unlikely((s64)delta_exec < 0))
489 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
491 curr->se.sum_exec_runtime += delta_exec;
492 curr->se.exec_start = rq->clock;
493 cpuacct_charge(curr, delta_exec);
495 for_each_sched_rt_entity(rt_se) {
496 rt_rq = rt_rq_of_se(rt_se);
498 spin_lock(&rt_rq->rt_runtime_lock);
499 rt_rq->rt_time += delta_exec;
500 if (sched_rt_runtime_exceeded(rt_rq))
502 spin_unlock(&rt_rq->rt_runtime_lock);
507 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
509 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
510 rt_rq->rt_nr_running++;
511 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
512 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
513 struct rq *rq = rq_of_rt_rq(rt_rq);
515 rt_rq->highest_prio = rt_se_prio(rt_se);
518 cpupri_set(&rq->rd->cpupri, rq->cpu,
524 if (rt_se->nr_cpus_allowed > 1) {
525 struct rq *rq = rq_of_rt_rq(rt_rq);
527 rq->rt.rt_nr_migratory++;
530 update_rt_migration(rq_of_rt_rq(rt_rq));
532 #ifdef CONFIG_RT_GROUP_SCHED
533 if (rt_se_boosted(rt_se))
534 rt_rq->rt_nr_boosted++;
537 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
539 start_rt_bandwidth(&def_rt_bandwidth);
544 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
547 int highest_prio = rt_rq->highest_prio;
550 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
551 WARN_ON(!rt_rq->rt_nr_running);
552 rt_rq->rt_nr_running--;
553 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
554 if (rt_rq->rt_nr_running) {
555 struct rt_prio_array *array;
557 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
558 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
560 array = &rt_rq->active;
561 rt_rq->highest_prio =
562 sched_find_first_bit(array->bitmap);
563 } /* otherwise leave rq->highest prio alone */
565 rt_rq->highest_prio = MAX_RT_PRIO;
568 if (rt_se->nr_cpus_allowed > 1) {
569 struct rq *rq = rq_of_rt_rq(rt_rq);
570 rq->rt.rt_nr_migratory--;
573 if (rt_rq->highest_prio != highest_prio) {
574 struct rq *rq = rq_of_rt_rq(rt_rq);
577 cpupri_set(&rq->rd->cpupri, rq->cpu,
578 rt_rq->highest_prio);
581 update_rt_migration(rq_of_rt_rq(rt_rq));
582 #endif /* CONFIG_SMP */
583 #ifdef CONFIG_RT_GROUP_SCHED
584 if (rt_se_boosted(rt_se))
585 rt_rq->rt_nr_boosted--;
587 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
591 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
593 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
594 struct rt_prio_array *array = &rt_rq->active;
595 struct rt_rq *group_rq = group_rt_rq(rt_se);
596 struct list_head *queue = array->queue + rt_se_prio(rt_se);
599 * Don't enqueue the group if its throttled, or when empty.
600 * The latter is a consequence of the former when a child group
601 * get throttled and the current group doesn't have any other
604 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
607 if (rt_se->nr_cpus_allowed == 1)
608 list_add(&rt_se->run_list, queue);
610 list_add_tail(&rt_se->run_list, queue);
612 __set_bit(rt_se_prio(rt_se), array->bitmap);
614 inc_rt_tasks(rt_se, rt_rq);
617 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
619 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
620 struct rt_prio_array *array = &rt_rq->active;
622 list_del_init(&rt_se->run_list);
623 if (list_empty(array->queue + rt_se_prio(rt_se)))
624 __clear_bit(rt_se_prio(rt_se), array->bitmap);
626 dec_rt_tasks(rt_se, rt_rq);
630 * Because the prio of an upper entry depends on the lower
631 * entries, we must remove entries top - down.
633 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
635 struct sched_rt_entity *back = NULL;
637 for_each_sched_rt_entity(rt_se) {
642 for (rt_se = back; rt_se; rt_se = rt_se->back) {
644 __dequeue_rt_entity(rt_se);
648 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
650 dequeue_rt_stack(rt_se);
651 for_each_sched_rt_entity(rt_se)
652 __enqueue_rt_entity(rt_se);
655 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
657 dequeue_rt_stack(rt_se);
659 for_each_sched_rt_entity(rt_se) {
660 struct rt_rq *rt_rq = group_rt_rq(rt_se);
662 if (rt_rq && rt_rq->rt_nr_running)
663 __enqueue_rt_entity(rt_se);
668 * Adding/removing a task to/from a priority array:
670 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
672 struct sched_rt_entity *rt_se = &p->rt;
677 enqueue_rt_entity(rt_se);
680 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
682 struct sched_rt_entity *rt_se = &p->rt;
685 dequeue_rt_entity(rt_se);
689 * Put task to the end of the run list without the overhead of dequeue
690 * followed by enqueue.
693 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
695 struct rt_prio_array *array = &rt_rq->active;
696 struct list_head *queue = array->queue + rt_se_prio(rt_se);
698 if (on_rt_rq(rt_se)) {
699 list_del_init(&rt_se->run_list);
700 list_add_tail(&rt_se->run_list,
701 array->queue + rt_se_prio(rt_se));
705 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
707 struct sched_rt_entity *rt_se = &p->rt;
710 for_each_sched_rt_entity(rt_se) {
711 rt_rq = rt_rq_of_se(rt_se);
712 requeue_rt_entity(rt_rq, rt_se);
716 static void yield_task_rt(struct rq *rq)
718 requeue_task_rt(rq, rq->curr);
722 static int find_lowest_rq(struct task_struct *task);
724 static int select_task_rq_rt(struct task_struct *p, int sync)
726 struct rq *rq = task_rq(p);
729 * If the current task is an RT task, then
730 * try to see if we can wake this RT task up on another
731 * runqueue. Otherwise simply start this RT task
732 * on its current runqueue.
734 * We want to avoid overloading runqueues. Even if
735 * the RT task is of higher priority than the current RT task.
736 * RT tasks behave differently than other tasks. If
737 * one gets preempted, we try to push it off to another queue.
738 * So trying to keep a preempting RT task on the same
739 * cache hot CPU will force the running RT task to
740 * a cold CPU. So we waste all the cache for the lower
741 * RT task in hopes of saving some of a RT task
742 * that is just being woken and probably will have
745 if (unlikely(rt_task(rq->curr)) &&
746 (p->rt.nr_cpus_allowed > 1)) {
747 int cpu = find_lowest_rq(p);
749 return (cpu == -1) ? task_cpu(p) : cpu;
753 * Otherwise, just let it ride on the affined RQ and the
754 * post-schedule router will push the preempted task away
758 #endif /* CONFIG_SMP */
761 * Preempt the current task with a newly woken task if needed:
763 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
765 if (p->prio < rq->curr->prio) {
766 resched_task(rq->curr);
774 * - the newly woken task is of equal priority to the current task
775 * - the newly woken task is non-migratable while current is migratable
776 * - current will be preempted on the next reschedule
778 * we should check to see if current can readily move to a different
779 * cpu. If so, we will reschedule to allow the push logic to try
780 * to move current somewhere else, making room for our non-migratable
783 if((p->prio == rq->curr->prio)
784 && p->rt.nr_cpus_allowed == 1
785 && rq->curr->rt.nr_cpus_allowed != 1) {
788 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
790 * There appears to be other cpus that can accept
791 * current, so lets reschedule to try and push it away
793 resched_task(rq->curr);
798 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
801 struct rt_prio_array *array = &rt_rq->active;
802 struct sched_rt_entity *next = NULL;
803 struct list_head *queue;
806 idx = sched_find_first_bit(array->bitmap);
807 BUG_ON(idx >= MAX_RT_PRIO);
809 queue = array->queue + idx;
810 next = list_entry(queue->next, struct sched_rt_entity, run_list);
815 static struct task_struct *pick_next_task_rt(struct rq *rq)
817 struct sched_rt_entity *rt_se;
818 struct task_struct *p;
823 if (unlikely(!rt_rq->rt_nr_running))
826 if (rt_rq_throttled(rt_rq))
830 rt_se = pick_next_rt_entity(rq, rt_rq);
832 rt_rq = group_rt_rq(rt_se);
835 p = rt_task_of(rt_se);
836 p->se.exec_start = rq->clock;
840 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
843 p->se.exec_start = 0;
848 /* Only try algorithms three times */
849 #define RT_MAX_TRIES 3
851 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
852 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
854 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
856 if (!task_running(rq, p) &&
857 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
858 (p->rt.nr_cpus_allowed > 1))
863 /* Return the second highest RT task, NULL otherwise */
864 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
866 struct task_struct *next = NULL;
867 struct sched_rt_entity *rt_se;
868 struct rt_prio_array *array;
872 for_each_leaf_rt_rq(rt_rq, rq) {
873 array = &rt_rq->active;
874 idx = sched_find_first_bit(array->bitmap);
876 if (idx >= MAX_RT_PRIO)
878 if (next && next->prio < idx)
880 list_for_each_entry(rt_se, array->queue + idx, run_list) {
881 struct task_struct *p = rt_task_of(rt_se);
882 if (pick_rt_task(rq, p, cpu)) {
888 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
896 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
898 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
902 /* "this_cpu" is cheaper to preempt than a remote processor */
903 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
906 first = first_cpu(*mask);
907 if (first != NR_CPUS)
913 static int find_lowest_rq(struct task_struct *task)
915 struct sched_domain *sd;
916 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
917 int this_cpu = smp_processor_id();
918 int cpu = task_cpu(task);
920 if (task->rt.nr_cpus_allowed == 1)
921 return -1; /* No other targets possible */
923 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
924 return -1; /* No targets found */
927 * At this point we have built a mask of cpus representing the
928 * lowest priority tasks in the system. Now we want to elect
929 * the best one based on our affinity and topology.
931 * We prioritize the last cpu that the task executed on since
932 * it is most likely cache-hot in that location.
934 if (cpu_isset(cpu, *lowest_mask))
938 * Otherwise, we consult the sched_domains span maps to figure
939 * out which cpu is logically closest to our hot cache data.
942 this_cpu = -1; /* Skip this_cpu opt if the same */
944 for_each_domain(cpu, sd) {
945 if (sd->flags & SD_WAKE_AFFINE) {
946 cpumask_t domain_mask;
949 cpus_and(domain_mask, sd->span, *lowest_mask);
951 best_cpu = pick_optimal_cpu(this_cpu,
959 * And finally, if there were no matches within the domains
960 * just give the caller *something* to work with from the compatible
963 return pick_optimal_cpu(this_cpu, lowest_mask);
966 /* Will lock the rq it finds */
967 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
969 struct rq *lowest_rq = NULL;
973 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
974 cpu = find_lowest_rq(task);
976 if ((cpu == -1) || (cpu == rq->cpu))
979 lowest_rq = cpu_rq(cpu);
981 /* if the prio of this runqueue changed, try again */
982 if (double_lock_balance(rq, lowest_rq)) {
984 * We had to unlock the run queue. In
985 * the mean time, task could have
986 * migrated already or had its affinity changed.
987 * Also make sure that it wasn't scheduled on its rq.
989 if (unlikely(task_rq(task) != rq ||
990 !cpu_isset(lowest_rq->cpu,
991 task->cpus_allowed) ||
992 task_running(rq, task) ||
995 spin_unlock(&lowest_rq->lock);
1001 /* If this rq is still suitable use it. */
1002 if (lowest_rq->rt.highest_prio > task->prio)
1006 spin_unlock(&lowest_rq->lock);
1014 * If the current CPU has more than one RT task, see if the non
1015 * running task can migrate over to a CPU that is running a task
1016 * of lesser priority.
1018 static int push_rt_task(struct rq *rq)
1020 struct task_struct *next_task;
1021 struct rq *lowest_rq;
1023 int paranoid = RT_MAX_TRIES;
1025 if (!rq->rt.overloaded)
1028 next_task = pick_next_highest_task_rt(rq, -1);
1033 if (unlikely(next_task == rq->curr)) {
1039 * It's possible that the next_task slipped in of
1040 * higher priority than current. If that's the case
1041 * just reschedule current.
1043 if (unlikely(next_task->prio < rq->curr->prio)) {
1044 resched_task(rq->curr);
1048 /* We might release rq lock */
1049 get_task_struct(next_task);
1051 /* find_lock_lowest_rq locks the rq if found */
1052 lowest_rq = find_lock_lowest_rq(next_task, rq);
1054 struct task_struct *task;
1056 * find lock_lowest_rq releases rq->lock
1057 * so it is possible that next_task has changed.
1058 * If it has, then try again.
1060 task = pick_next_highest_task_rt(rq, -1);
1061 if (unlikely(task != next_task) && task && paranoid--) {
1062 put_task_struct(next_task);
1069 deactivate_task(rq, next_task, 0);
1070 set_task_cpu(next_task, lowest_rq->cpu);
1071 activate_task(lowest_rq, next_task, 0);
1073 resched_task(lowest_rq->curr);
1075 spin_unlock(&lowest_rq->lock);
1079 put_task_struct(next_task);
1085 * TODO: Currently we just use the second highest prio task on
1086 * the queue, and stop when it can't migrate (or there's
1087 * no more RT tasks). There may be a case where a lower
1088 * priority RT task has a different affinity than the
1089 * higher RT task. In this case the lower RT task could
1090 * possibly be able to migrate where as the higher priority
1091 * RT task could not. We currently ignore this issue.
1092 * Enhancements are welcome!
1094 static void push_rt_tasks(struct rq *rq)
1096 /* push_rt_task will return true if it moved an RT */
1097 while (push_rt_task(rq))
1101 static int pull_rt_task(struct rq *this_rq)
1103 int this_cpu = this_rq->cpu, ret = 0, cpu;
1104 struct task_struct *p, *next;
1107 if (likely(!rt_overloaded(this_rq)))
1110 next = pick_next_task_rt(this_rq);
1112 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1113 if (this_cpu == cpu)
1116 src_rq = cpu_rq(cpu);
1118 * We can potentially drop this_rq's lock in
1119 * double_lock_balance, and another CPU could
1120 * steal our next task - hence we must cause
1121 * the caller to recalculate the next task
1124 if (double_lock_balance(this_rq, src_rq)) {
1125 struct task_struct *old_next = next;
1127 next = pick_next_task_rt(this_rq);
1128 if (next != old_next)
1133 * Are there still pullable RT tasks?
1135 if (src_rq->rt.rt_nr_running <= 1)
1138 p = pick_next_highest_task_rt(src_rq, this_cpu);
1141 * Do we have an RT task that preempts
1142 * the to-be-scheduled task?
1144 if (p && (!next || (p->prio < next->prio))) {
1145 WARN_ON(p == src_rq->curr);
1146 WARN_ON(!p->se.on_rq);
1149 * There's a chance that p is higher in priority
1150 * than what's currently running on its cpu.
1151 * This is just that p is wakeing up and hasn't
1152 * had a chance to schedule. We only pull
1153 * p if it is lower in priority than the
1154 * current task on the run queue or
1155 * this_rq next task is lower in prio than
1156 * the current task on that rq.
1158 if (p->prio < src_rq->curr->prio ||
1159 (next && next->prio < src_rq->curr->prio))
1164 deactivate_task(src_rq, p, 0);
1165 set_task_cpu(p, this_cpu);
1166 activate_task(this_rq, p, 0);
1168 * We continue with the search, just in
1169 * case there's an even higher prio task
1170 * in another runqueue. (low likelyhood
1173 * Update next so that we won't pick a task
1174 * on another cpu with a priority lower (or equal)
1175 * than the one we just picked.
1181 spin_unlock(&src_rq->lock);
1187 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1189 /* Try to pull RT tasks here if we lower this rq's prio */
1190 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1194 static void post_schedule_rt(struct rq *rq)
1197 * If we have more than one rt_task queued, then
1198 * see if we can push the other rt_tasks off to other CPUS.
1199 * Note we may release the rq lock, and since
1200 * the lock was owned by prev, we need to release it
1201 * first via finish_lock_switch and then reaquire it here.
1203 if (unlikely(rq->rt.overloaded)) {
1204 spin_lock_irq(&rq->lock);
1206 spin_unlock_irq(&rq->lock);
1211 * If we are not running and we are not going to reschedule soon, we should
1212 * try to push tasks away now
1214 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1216 if (!task_running(rq, p) &&
1217 !test_tsk_need_resched(rq->curr) &&
1222 static unsigned long
1223 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1224 unsigned long max_load_move,
1225 struct sched_domain *sd, enum cpu_idle_type idle,
1226 int *all_pinned, int *this_best_prio)
1228 /* don't touch RT tasks */
1233 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1234 struct sched_domain *sd, enum cpu_idle_type idle)
1236 /* don't touch RT tasks */
1240 static void set_cpus_allowed_rt(struct task_struct *p,
1241 const cpumask_t *new_mask)
1243 int weight = cpus_weight(*new_mask);
1245 BUG_ON(!rt_task(p));
1248 * Update the migration status of the RQ if we have an RT task
1249 * which is running AND changing its weight value.
1251 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1252 struct rq *rq = task_rq(p);
1254 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1255 rq->rt.rt_nr_migratory++;
1256 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1257 BUG_ON(!rq->rt.rt_nr_migratory);
1258 rq->rt.rt_nr_migratory--;
1261 update_rt_migration(rq);
1264 p->cpus_allowed = *new_mask;
1265 p->rt.nr_cpus_allowed = weight;
1268 /* Assumes rq->lock is held */
1269 static void rq_online_rt(struct rq *rq)
1271 if (rq->rt.overloaded)
1272 rt_set_overload(rq);
1274 __enable_runtime(rq);
1276 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1279 /* Assumes rq->lock is held */
1280 static void rq_offline_rt(struct rq *rq)
1282 if (rq->rt.overloaded)
1283 rt_clear_overload(rq);
1285 __disable_runtime(rq);
1287 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1291 * When switch from the rt queue, we bring ourselves to a position
1292 * that we might want to pull RT tasks from other runqueues.
1294 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1298 * If there are other RT tasks then we will reschedule
1299 * and the scheduling of the other RT tasks will handle
1300 * the balancing. But if we are the last RT task
1301 * we may need to handle the pulling of RT tasks
1304 if (!rq->rt.rt_nr_running)
1307 #endif /* CONFIG_SMP */
1310 * When switching a task to RT, we may overload the runqueue
1311 * with RT tasks. In this case we try to push them off to
1314 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1317 int check_resched = 1;
1320 * If we are already running, then there's nothing
1321 * that needs to be done. But if we are not running
1322 * we may need to preempt the current running task.
1323 * If that current running task is also an RT task
1324 * then see if we can move to another run queue.
1328 if (rq->rt.overloaded && push_rt_task(rq) &&
1329 /* Don't resched if we changed runqueues */
1332 #endif /* CONFIG_SMP */
1333 if (check_resched && p->prio < rq->curr->prio)
1334 resched_task(rq->curr);
1339 * Priority of the task has changed. This may cause
1340 * us to initiate a push or pull.
1342 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1343 int oldprio, int running)
1348 * If our priority decreases while running, we
1349 * may need to pull tasks to this runqueue.
1351 if (oldprio < p->prio)
1354 * If there's a higher priority task waiting to run
1355 * then reschedule. Note, the above pull_rt_task
1356 * can release the rq lock and p could migrate.
1357 * Only reschedule if p is still on the same runqueue.
1359 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1362 /* For UP simply resched on drop of prio */
1363 if (oldprio < p->prio)
1365 #endif /* CONFIG_SMP */
1368 * This task is not running, but if it is
1369 * greater than the current running task
1372 if (p->prio < rq->curr->prio)
1373 resched_task(rq->curr);
1377 static void watchdog(struct rq *rq, struct task_struct *p)
1379 unsigned long soft, hard;
1384 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1385 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1387 if (soft != RLIM_INFINITY) {
1391 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1392 if (p->rt.timeout > next)
1393 p->it_sched_expires = p->se.sum_exec_runtime;
1397 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1404 * RR tasks need a special form of timeslice management.
1405 * FIFO tasks have no timeslices.
1407 if (p->policy != SCHED_RR)
1410 if (--p->rt.time_slice)
1413 p->rt.time_slice = DEF_TIMESLICE;
1416 * Requeue to the end of queue if we are not the only element
1419 if (p->rt.run_list.prev != p->rt.run_list.next) {
1420 requeue_task_rt(rq, p);
1421 set_tsk_need_resched(p);
1425 static void set_curr_task_rt(struct rq *rq)
1427 struct task_struct *p = rq->curr;
1429 p->se.exec_start = rq->clock;
1432 static const struct sched_class rt_sched_class = {
1433 .next = &fair_sched_class,
1434 .enqueue_task = enqueue_task_rt,
1435 .dequeue_task = dequeue_task_rt,
1436 .yield_task = yield_task_rt,
1438 .select_task_rq = select_task_rq_rt,
1439 #endif /* CONFIG_SMP */
1441 .check_preempt_curr = check_preempt_curr_rt,
1443 .pick_next_task = pick_next_task_rt,
1444 .put_prev_task = put_prev_task_rt,
1447 .load_balance = load_balance_rt,
1448 .move_one_task = move_one_task_rt,
1449 .set_cpus_allowed = set_cpus_allowed_rt,
1450 .rq_online = rq_online_rt,
1451 .rq_offline = rq_offline_rt,
1452 .pre_schedule = pre_schedule_rt,
1453 .post_schedule = post_schedule_rt,
1454 .task_wake_up = task_wake_up_rt,
1455 .switched_from = switched_from_rt,
1458 .set_curr_task = set_curr_task_rt,
1459 .task_tick = task_tick_rt,
1461 .prio_changed = prio_changed_rt,
1462 .switched_to = switched_to_rt,
1465 #ifdef CONFIG_SCHED_DEBUG
1466 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1468 static void print_rt_stats(struct seq_file *m, int cpu)
1470 struct rt_rq *rt_rq;
1473 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1474 print_rt_rq(m, cpu, rt_rq);