2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
9 * The "RT overload" flag: it gets set if a CPU has more than
10 * one runnable RT task.
12 static cpumask_t rt_overload_mask;
13 static atomic_t rto_count;
15 static inline int rt_overloaded(void)
17 return atomic_read(&rto_count);
20 static inline void rt_set_overload(struct rq *rq)
22 rq->rt.overloaded = 1;
23 cpu_set(rq->cpu, rt_overload_mask);
25 * Make sure the mask is visible before we set
26 * the overload count. That is checked to determine
27 * if we should look at the mask. It would be a shame
28 * if we looked at the mask, but the mask was not
32 atomic_inc(&rto_count);
35 static inline void rt_clear_overload(struct rq *rq)
37 /* the order here really doesn't matter */
38 atomic_dec(&rto_count);
39 cpu_clear(rq->cpu, rt_overload_mask);
40 rq->rt.overloaded = 0;
43 static void update_rt_migration(struct rq *rq)
45 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
48 rt_clear_overload(rq);
50 #endif /* CONFIG_SMP */
53 * Update the current task's runtime statistics. Skip current tasks that
54 * are not in our scheduling class.
56 static void update_curr_rt(struct rq *rq)
58 struct task_struct *curr = rq->curr;
61 if (!task_has_rt_policy(curr))
64 delta_exec = rq->clock - curr->se.exec_start;
65 if (unlikely((s64)delta_exec < 0))
68 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
70 curr->se.sum_exec_runtime += delta_exec;
71 curr->se.exec_start = rq->clock;
72 cpuacct_charge(curr, delta_exec);
75 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
78 rq->rt.rt_nr_running++;
80 if (p->prio < rq->rt.highest_prio)
81 rq->rt.highest_prio = p->prio;
82 if (p->nr_cpus_allowed > 1)
83 rq->rt.rt_nr_migratory++;
85 update_rt_migration(rq);
86 #endif /* CONFIG_SMP */
89 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
92 WARN_ON(!rq->rt.rt_nr_running);
93 rq->rt.rt_nr_running--;
95 if (rq->rt.rt_nr_running) {
96 struct rt_prio_array *array;
98 WARN_ON(p->prio < rq->rt.highest_prio);
99 if (p->prio == rq->rt.highest_prio) {
101 array = &rq->rt.active;
102 rq->rt.highest_prio =
103 sched_find_first_bit(array->bitmap);
104 } /* otherwise leave rq->highest prio alone */
106 rq->rt.highest_prio = MAX_RT_PRIO;
107 if (p->nr_cpus_allowed > 1)
108 rq->rt.rt_nr_migratory--;
110 update_rt_migration(rq);
111 #endif /* CONFIG_SMP */
114 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
116 struct rt_prio_array *array = &rq->rt.active;
118 list_add_tail(&p->run_list, array->queue + p->prio);
119 __set_bit(p->prio, array->bitmap);
120 inc_cpu_load(rq, p->se.load.weight);
126 * Adding/removing a task to/from a priority array:
128 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
130 struct rt_prio_array *array = &rq->rt.active;
134 list_del(&p->run_list);
135 if (list_empty(array->queue + p->prio))
136 __clear_bit(p->prio, array->bitmap);
137 dec_cpu_load(rq, p->se.load.weight);
143 * Put task to the end of the run list without the overhead of dequeue
144 * followed by enqueue.
146 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
148 struct rt_prio_array *array = &rq->rt.active;
150 list_move_tail(&p->run_list, array->queue + p->prio);
154 yield_task_rt(struct rq *rq)
156 requeue_task_rt(rq, rq->curr);
160 static int find_lowest_rq(struct task_struct *task);
162 static int select_task_rq_rt(struct task_struct *p, int sync)
164 struct rq *rq = task_rq(p);
167 * If the current task is an RT task, then
168 * try to see if we can wake this RT task up on another
169 * runqueue. Otherwise simply start this RT task
170 * on its current runqueue.
172 * We want to avoid overloading runqueues. Even if
173 * the RT task is of higher priority than the current RT task.
174 * RT tasks behave differently than other tasks. If
175 * one gets preempted, we try to push it off to another queue.
176 * So trying to keep a preempting RT task on the same
177 * cache hot CPU will force the running RT task to
178 * a cold CPU. So we waste all the cache for the lower
179 * RT task in hopes of saving some of a RT task
180 * that is just being woken and probably will have
183 if (unlikely(rt_task(rq->curr)) &&
184 (p->nr_cpus_allowed > 1)) {
185 int cpu = find_lowest_rq(p);
187 return (cpu == -1) ? task_cpu(p) : cpu;
191 * Otherwise, just let it ride on the affined RQ and the
192 * post-schedule router will push the preempted task away
196 #endif /* CONFIG_SMP */
199 * Preempt the current task with a newly woken task if needed:
201 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
203 if (p->prio < rq->curr->prio)
204 resched_task(rq->curr);
207 static struct task_struct *pick_next_task_rt(struct rq *rq)
209 struct rt_prio_array *array = &rq->rt.active;
210 struct task_struct *next;
211 struct list_head *queue;
214 idx = sched_find_first_bit(array->bitmap);
215 if (idx >= MAX_RT_PRIO)
218 queue = array->queue + idx;
219 next = list_entry(queue->next, struct task_struct, run_list);
221 next->se.exec_start = rq->clock;
226 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
229 p->se.exec_start = 0;
233 /* Only try algorithms three times */
234 #define RT_MAX_TRIES 3
236 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
237 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
239 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
241 if (!task_running(rq, p) &&
242 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
243 (p->nr_cpus_allowed > 1))
248 /* Return the second highest RT task, NULL otherwise */
249 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
251 struct rt_prio_array *array = &rq->rt.active;
252 struct task_struct *next;
253 struct list_head *queue;
256 assert_spin_locked(&rq->lock);
258 if (likely(rq->rt.rt_nr_running < 2))
261 idx = sched_find_first_bit(array->bitmap);
262 if (unlikely(idx >= MAX_RT_PRIO)) {
263 WARN_ON(1); /* rt_nr_running is bad */
267 queue = array->queue + idx;
268 BUG_ON(list_empty(queue));
270 next = list_entry(queue->next, struct task_struct, run_list);
271 if (unlikely(pick_rt_task(rq, next, cpu)))
274 if (queue->next->next != queue) {
276 next = list_entry(queue->next->next, struct task_struct,
278 if (pick_rt_task(rq, next, cpu))
283 /* slower, but more flexible */
284 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
285 if (unlikely(idx >= MAX_RT_PRIO))
288 queue = array->queue + idx;
289 BUG_ON(list_empty(queue));
291 list_for_each_entry(next, queue, run_list) {
292 if (pick_rt_task(rq, next, cpu))
302 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
304 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
306 int lowest_prio = -1;
311 cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
314 * Scan each rq for the lowest prio.
316 for_each_cpu_mask(cpu, *lowest_mask) {
317 struct rq *rq = cpu_rq(cpu);
319 /* We look for lowest RT prio or non-rt CPU */
320 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
322 * if we already found a low RT queue
323 * and now we found this non-rt queue
324 * clear the mask and set our bit.
325 * Otherwise just return the queue as is
326 * and the count==1 will cause the algorithm
327 * to use the first bit found.
329 if (lowest_cpu != -1) {
330 cpus_clear(*lowest_mask);
331 cpu_set(rq->cpu, *lowest_mask);
336 /* no locking for now */
337 if ((rq->rt.highest_prio > task->prio)
338 && (rq->rt.highest_prio >= lowest_prio)) {
339 if (rq->rt.highest_prio > lowest_prio) {
340 /* new low - clear old data */
341 lowest_prio = rq->rt.highest_prio;
347 cpu_clear(cpu, *lowest_mask);
351 * Clear out all the set bits that represent
352 * runqueues that were of higher prio than
355 if (lowest_cpu > 0) {
357 * Perhaps we could add another cpumask op to
358 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
359 * Then that could be optimized to use memset and such.
361 for_each_cpu_mask(cpu, *lowest_mask) {
362 if (cpu >= lowest_cpu)
364 cpu_clear(cpu, *lowest_mask);
371 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
375 /* "this_cpu" is cheaper to preempt than a remote processor */
376 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
379 first = first_cpu(*mask);
380 if (first != NR_CPUS)
386 static int find_lowest_rq(struct task_struct *task)
388 struct sched_domain *sd;
389 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
390 int this_cpu = smp_processor_id();
391 int cpu = task_cpu(task);
392 int count = find_lowest_cpus(task, lowest_mask);
395 return -1; /* No targets found */
398 * There is no sense in performing an optimal search if only one
402 return first_cpu(*lowest_mask);
405 * At this point we have built a mask of cpus representing the
406 * lowest priority tasks in the system. Now we want to elect
407 * the best one based on our affinity and topology.
409 * We prioritize the last cpu that the task executed on since
410 * it is most likely cache-hot in that location.
412 if (cpu_isset(cpu, *lowest_mask))
416 * Otherwise, we consult the sched_domains span maps to figure
417 * out which cpu is logically closest to our hot cache data.
420 this_cpu = -1; /* Skip this_cpu opt if the same */
422 for_each_domain(cpu, sd) {
423 if (sd->flags & SD_WAKE_AFFINE) {
424 cpumask_t domain_mask;
427 cpus_and(domain_mask, sd->span, *lowest_mask);
429 best_cpu = pick_optimal_cpu(this_cpu,
437 * And finally, if there were no matches within the domains
438 * just give the caller *something* to work with from the compatible
441 return pick_optimal_cpu(this_cpu, lowest_mask);
444 /* Will lock the rq it finds */
445 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
447 struct rq *lowest_rq = NULL;
451 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
452 cpu = find_lowest_rq(task);
454 if ((cpu == -1) || (cpu == rq->cpu))
457 lowest_rq = cpu_rq(cpu);
459 /* if the prio of this runqueue changed, try again */
460 if (double_lock_balance(rq, lowest_rq)) {
462 * We had to unlock the run queue. In
463 * the mean time, task could have
464 * migrated already or had its affinity changed.
465 * Also make sure that it wasn't scheduled on its rq.
467 if (unlikely(task_rq(task) != rq ||
468 !cpu_isset(lowest_rq->cpu,
469 task->cpus_allowed) ||
470 task_running(rq, task) ||
473 spin_unlock(&lowest_rq->lock);
479 /* If this rq is still suitable use it. */
480 if (lowest_rq->rt.highest_prio > task->prio)
484 spin_unlock(&lowest_rq->lock);
492 * If the current CPU has more than one RT task, see if the non
493 * running task can migrate over to a CPU that is running a task
494 * of lesser priority.
496 static int push_rt_task(struct rq *rq)
498 struct task_struct *next_task;
499 struct rq *lowest_rq;
501 int paranoid = RT_MAX_TRIES;
503 assert_spin_locked(&rq->lock);
505 if (!rq->rt.overloaded)
508 next_task = pick_next_highest_task_rt(rq, -1);
513 if (unlikely(next_task == rq->curr)) {
519 * It's possible that the next_task slipped in of
520 * higher priority than current. If that's the case
521 * just reschedule current.
523 if (unlikely(next_task->prio < rq->curr->prio)) {
524 resched_task(rq->curr);
528 /* We might release rq lock */
529 get_task_struct(next_task);
531 /* find_lock_lowest_rq locks the rq if found */
532 lowest_rq = find_lock_lowest_rq(next_task, rq);
534 struct task_struct *task;
536 * find lock_lowest_rq releases rq->lock
537 * so it is possible that next_task has changed.
538 * If it has, then try again.
540 task = pick_next_highest_task_rt(rq, -1);
541 if (unlikely(task != next_task) && task && paranoid--) {
542 put_task_struct(next_task);
549 assert_spin_locked(&lowest_rq->lock);
551 deactivate_task(rq, next_task, 0);
552 set_task_cpu(next_task, lowest_rq->cpu);
553 activate_task(lowest_rq, next_task, 0);
555 resched_task(lowest_rq->curr);
557 spin_unlock(&lowest_rq->lock);
561 put_task_struct(next_task);
567 * TODO: Currently we just use the second highest prio task on
568 * the queue, and stop when it can't migrate (or there's
569 * no more RT tasks). There may be a case where a lower
570 * priority RT task has a different affinity than the
571 * higher RT task. In this case the lower RT task could
572 * possibly be able to migrate where as the higher priority
573 * RT task could not. We currently ignore this issue.
574 * Enhancements are welcome!
576 static void push_rt_tasks(struct rq *rq)
578 /* push_rt_task will return true if it moved an RT */
579 while (push_rt_task(rq))
583 static int pull_rt_task(struct rq *this_rq)
585 struct task_struct *next;
586 struct task_struct *p;
588 int this_cpu = this_rq->cpu;
592 assert_spin_locked(&this_rq->lock);
595 * If cpusets are used, and we have overlapping
596 * run queue cpusets, then this algorithm may not catch all.
597 * This is just the price you pay on trying to keep
598 * dirtying caches down on large SMP machines.
600 if (likely(!rt_overloaded()))
603 next = pick_next_task_rt(this_rq);
605 for_each_cpu_mask(cpu, rt_overload_mask) {
609 src_rq = cpu_rq(cpu);
610 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
612 * It is possible that overlapping cpusets
613 * will miss clearing a non overloaded runqueue.
616 if (double_lock_balance(this_rq, src_rq)) {
617 /* unlocked our runqueue lock */
618 struct task_struct *old_next = next;
619 next = pick_next_task_rt(this_rq);
620 if (next != old_next)
623 if (likely(src_rq->rt.rt_nr_running <= 1))
625 * Small chance that this_rq->curr changed
626 * but it's really harmless here.
628 rt_clear_overload(this_rq);
631 * Heh, the src_rq is now overloaded, since
632 * we already have the src_rq lock, go straight
633 * to pulling tasks from it.
636 spin_unlock(&src_rq->lock);
641 * We can potentially drop this_rq's lock in
642 * double_lock_balance, and another CPU could
643 * steal our next task - hence we must cause
644 * the caller to recalculate the next task
647 if (double_lock_balance(this_rq, src_rq)) {
648 struct task_struct *old_next = next;
649 next = pick_next_task_rt(this_rq);
650 if (next != old_next)
655 * Are there still pullable RT tasks?
657 if (src_rq->rt.rt_nr_running <= 1) {
658 spin_unlock(&src_rq->lock);
663 p = pick_next_highest_task_rt(src_rq, this_cpu);
666 * Do we have an RT task that preempts
667 * the to-be-scheduled task?
669 if (p && (!next || (p->prio < next->prio))) {
670 WARN_ON(p == src_rq->curr);
671 WARN_ON(!p->se.on_rq);
674 * There's a chance that p is higher in priority
675 * than what's currently running on its cpu.
676 * This is just that p is wakeing up and hasn't
677 * had a chance to schedule. We only pull
678 * p if it is lower in priority than the
679 * current task on the run queue or
680 * this_rq next task is lower in prio than
681 * the current task on that rq.
683 if (p->prio < src_rq->curr->prio ||
684 (next && next->prio < src_rq->curr->prio))
689 deactivate_task(src_rq, p, 0);
690 set_task_cpu(p, this_cpu);
691 activate_task(this_rq, p, 0);
693 * We continue with the search, just in
694 * case there's an even higher prio task
695 * in another runqueue. (low likelyhood
700 * Update next so that we won't pick a task
701 * on another cpu with a priority lower (or equal)
702 * than the one we just picked.
708 spin_unlock(&src_rq->lock);
714 static void schedule_balance_rt(struct rq *rq,
715 struct task_struct *prev)
717 /* Try to pull RT tasks here if we lower this rq's prio */
718 if (unlikely(rt_task(prev)) &&
719 rq->rt.highest_prio > prev->prio)
723 static void schedule_tail_balance_rt(struct rq *rq)
726 * If we have more than one rt_task queued, then
727 * see if we can push the other rt_tasks off to other CPUS.
728 * Note we may release the rq lock, and since
729 * the lock was owned by prev, we need to release it
730 * first via finish_lock_switch and then reaquire it here.
732 if (unlikely(rq->rt.overloaded)) {
733 spin_lock_irq(&rq->lock);
735 spin_unlock_irq(&rq->lock);
740 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
742 if (unlikely(rt_task(p)) &&
743 !task_running(rq, p) &&
744 (p->prio >= rq->rt.highest_prio) &&
750 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
751 unsigned long max_load_move,
752 struct sched_domain *sd, enum cpu_idle_type idle,
753 int *all_pinned, int *this_best_prio)
755 /* don't touch RT tasks */
760 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
761 struct sched_domain *sd, enum cpu_idle_type idle)
763 /* don't touch RT tasks */
767 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
769 int weight = cpus_weight(*new_mask);
774 * Update the migration status of the RQ if we have an RT task
775 * which is running AND changing its weight value.
777 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
778 struct rq *rq = task_rq(p);
780 if ((p->nr_cpus_allowed <= 1) && (weight > 1)) {
781 rq->rt.rt_nr_migratory++;
782 } else if ((p->nr_cpus_allowed > 1) && (weight <= 1)) {
783 BUG_ON(!rq->rt.rt_nr_migratory);
784 rq->rt.rt_nr_migratory--;
787 update_rt_migration(rq);
790 p->cpus_allowed = *new_mask;
791 p->nr_cpus_allowed = weight;
794 #else /* CONFIG_SMP */
795 # define schedule_tail_balance_rt(rq) do { } while (0)
796 # define schedule_balance_rt(rq, prev) do { } while (0)
797 # define wakeup_balance_rt(rq, p) do { } while (0)
798 #endif /* CONFIG_SMP */
800 static void task_tick_rt(struct rq *rq, struct task_struct *p)
805 * RR tasks need a special form of timeslice management.
806 * FIFO tasks have no timeslices.
808 if (p->policy != SCHED_RR)
814 p->time_slice = DEF_TIMESLICE;
817 * Requeue to the end of queue if we are not the only element
820 if (p->run_list.prev != p->run_list.next) {
821 requeue_task_rt(rq, p);
822 set_tsk_need_resched(p);
826 static void set_curr_task_rt(struct rq *rq)
828 struct task_struct *p = rq->curr;
830 p->se.exec_start = rq->clock;
833 const struct sched_class rt_sched_class = {
834 .next = &fair_sched_class,
835 .enqueue_task = enqueue_task_rt,
836 .dequeue_task = dequeue_task_rt,
837 .yield_task = yield_task_rt,
839 .select_task_rq = select_task_rq_rt,
840 #endif /* CONFIG_SMP */
842 .check_preempt_curr = check_preempt_curr_rt,
844 .pick_next_task = pick_next_task_rt,
845 .put_prev_task = put_prev_task_rt,
848 .load_balance = load_balance_rt,
849 .move_one_task = move_one_task_rt,
850 .set_cpus_allowed = set_cpus_allowed_rt,
853 .set_curr_task = set_curr_task_rt,
854 .task_tick = task_tick_rt,