2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
10 static void update_curr_rt(struct rq *rq)
12 struct task_struct *curr = rq->curr;
15 if (!task_has_rt_policy(curr))
18 delta_exec = rq->clock - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0))
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = rq->clock;
26 cpuacct_charge(curr, delta_exec);
29 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
32 rq->rt.rt_nr_running++;
34 if (p->prio < rq->rt.highest_prio)
35 rq->rt.highest_prio = p->prio;
36 #endif /* CONFIG_SMP */
39 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
42 WARN_ON(!rq->rt.rt_nr_running);
43 rq->rt.rt_nr_running--;
45 if (rq->rt.rt_nr_running) {
46 struct rt_prio_array *array;
48 WARN_ON(p->prio < rq->rt.highest_prio);
49 if (p->prio == rq->rt.highest_prio) {
51 array = &rq->rt.active;
53 sched_find_first_bit(array->bitmap);
54 } /* otherwise leave rq->highest prio alone */
56 rq->rt.highest_prio = MAX_RT_PRIO;
57 #endif /* CONFIG_SMP */
60 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
62 struct rt_prio_array *array = &rq->rt.active;
64 list_add_tail(&p->run_list, array->queue + p->prio);
65 __set_bit(p->prio, array->bitmap);
66 inc_cpu_load(rq, p->se.load.weight);
72 * Adding/removing a task to/from a priority array:
74 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
76 struct rt_prio_array *array = &rq->rt.active;
80 list_del(&p->run_list);
81 if (list_empty(array->queue + p->prio))
82 __clear_bit(p->prio, array->bitmap);
83 dec_cpu_load(rq, p->se.load.weight);
89 * Put task to the end of the run list without the overhead of dequeue
90 * followed by enqueue.
92 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
94 struct rt_prio_array *array = &rq->rt.active;
96 list_move_tail(&p->run_list, array->queue + p->prio);
100 yield_task_rt(struct rq *rq)
102 requeue_task_rt(rq, rq->curr);
106 * Preempt the current task with a newly woken task if needed:
108 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
110 if (p->prio < rq->curr->prio)
111 resched_task(rq->curr);
114 static struct task_struct *pick_next_task_rt(struct rq *rq)
116 struct rt_prio_array *array = &rq->rt.active;
117 struct task_struct *next;
118 struct list_head *queue;
121 idx = sched_find_first_bit(array->bitmap);
122 if (idx >= MAX_RT_PRIO)
125 queue = array->queue + idx;
126 next = list_entry(queue->next, struct task_struct, run_list);
128 next->se.exec_start = rq->clock;
133 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
136 p->se.exec_start = 0;
140 /* Only try algorithms three times */
141 #define RT_MAX_TRIES 3
143 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
144 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
146 /* Return the second highest RT task, NULL otherwise */
147 static struct task_struct *pick_next_highest_task_rt(struct rq *rq)
149 struct rt_prio_array *array = &rq->rt.active;
150 struct task_struct *next;
151 struct list_head *queue;
154 assert_spin_locked(&rq->lock);
156 if (likely(rq->rt.rt_nr_running < 2))
159 idx = sched_find_first_bit(array->bitmap);
160 if (unlikely(idx >= MAX_RT_PRIO)) {
161 WARN_ON(1); /* rt_nr_running is bad */
165 queue = array->queue + idx;
166 next = list_entry(queue->next, struct task_struct, run_list);
167 if (unlikely(next != rq->curr))
170 if (queue->next->next != queue) {
172 next = list_entry(queue->next->next, struct task_struct, run_list);
176 /* slower, but more flexible */
177 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
178 if (unlikely(idx >= MAX_RT_PRIO)) {
179 WARN_ON(1); /* rt_nr_running was 2 and above! */
183 queue = array->queue + idx;
184 next = list_entry(queue->next, struct task_struct, run_list);
189 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
191 /* Will lock the rq it finds */
192 static struct rq *find_lock_lowest_rq(struct task_struct *task,
195 struct rq *lowest_rq = NULL;
198 cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
200 cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
202 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
204 * Scan each rq for the lowest prio.
206 for_each_cpu_mask(cpu, *cpu_mask) {
207 struct rq *rq = &per_cpu(runqueues, cpu);
209 if (cpu == this_rq->cpu)
212 /* We look for lowest RT prio or non-rt CPU */
213 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
218 /* no locking for now */
219 if (rq->rt.highest_prio > task->prio &&
220 (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
228 /* if the prio of this runqueue changed, try again */
229 if (double_lock_balance(this_rq, lowest_rq)) {
231 * We had to unlock the run queue. In
232 * the mean time, task could have
233 * migrated already or had its affinity changed.
234 * Also make sure that it wasn't scheduled on its rq.
236 if (unlikely(task_rq(task) != this_rq ||
237 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
238 task_running(this_rq, task) ||
240 spin_unlock(&lowest_rq->lock);
246 /* If this rq is still suitable use it. */
247 if (lowest_rq->rt.highest_prio > task->prio)
251 spin_unlock(&lowest_rq->lock);
259 * If the current CPU has more than one RT task, see if the non
260 * running task can migrate over to a CPU that is running a task
261 * of lesser priority.
263 static int push_rt_task(struct rq *this_rq)
265 struct task_struct *next_task;
266 struct rq *lowest_rq;
268 int paranoid = RT_MAX_TRIES;
270 assert_spin_locked(&this_rq->lock);
272 next_task = pick_next_highest_task_rt(this_rq);
277 if (unlikely(next_task == this_rq->curr))
281 * It's possible that the next_task slipped in of
282 * higher priority than current. If that's the case
283 * just reschedule current.
285 if (unlikely(next_task->prio < this_rq->curr->prio)) {
286 resched_task(this_rq->curr);
290 /* We might release this_rq lock */
291 get_task_struct(next_task);
293 /* find_lock_lowest_rq locks the rq if found */
294 lowest_rq = find_lock_lowest_rq(next_task, this_rq);
296 struct task_struct *task;
298 * find lock_lowest_rq releases this_rq->lock
299 * so it is possible that next_task has changed.
300 * If it has, then try again.
302 task = pick_next_highest_task_rt(this_rq);
303 if (unlikely(task != next_task) && task && paranoid--) {
304 put_task_struct(next_task);
311 assert_spin_locked(&lowest_rq->lock);
313 deactivate_task(this_rq, next_task, 0);
314 set_task_cpu(next_task, lowest_rq->cpu);
315 activate_task(lowest_rq, next_task, 0);
317 resched_task(lowest_rq->curr);
319 spin_unlock(&lowest_rq->lock);
323 put_task_struct(next_task);
329 * TODO: Currently we just use the second highest prio task on
330 * the queue, and stop when it can't migrate (or there's
331 * no more RT tasks). There may be a case where a lower
332 * priority RT task has a different affinity than the
333 * higher RT task. In this case the lower RT task could
334 * possibly be able to migrate where as the higher priority
335 * RT task could not. We currently ignore this issue.
336 * Enhancements are welcome!
338 static void push_rt_tasks(struct rq *rq)
340 /* push_rt_task will return true if it moved an RT */
341 while (push_rt_task(rq))
345 static void schedule_tail_balance_rt(struct rq *rq)
348 * If we have more than one rt_task queued, then
349 * see if we can push the other rt_tasks off to other CPUS.
350 * Note we may release the rq lock, and since
351 * the lock was owned by prev, we need to release it
352 * first via finish_lock_switch and then reaquire it here.
354 if (unlikely(rq->rt.rt_nr_running > 1)) {
355 spin_lock_irq(&rq->lock);
357 spin_unlock_irq(&rq->lock);
362 * Load-balancing iterator. Note: while the runqueue stays locked
363 * during the whole iteration, the current task might be
364 * dequeued so the iterator has to be dequeue-safe. Here we
365 * achieve that by always pre-iterating before returning
368 static struct task_struct *load_balance_start_rt(void *arg)
371 struct rt_prio_array *array = &rq->rt.active;
372 struct list_head *head, *curr;
373 struct task_struct *p;
376 idx = sched_find_first_bit(array->bitmap);
377 if (idx >= MAX_RT_PRIO)
380 head = array->queue + idx;
383 p = list_entry(curr, struct task_struct, run_list);
387 rq->rt.rt_load_balance_idx = idx;
388 rq->rt.rt_load_balance_head = head;
389 rq->rt.rt_load_balance_curr = curr;
394 static struct task_struct *load_balance_next_rt(void *arg)
397 struct rt_prio_array *array = &rq->rt.active;
398 struct list_head *head, *curr;
399 struct task_struct *p;
402 idx = rq->rt.rt_load_balance_idx;
403 head = rq->rt.rt_load_balance_head;
404 curr = rq->rt.rt_load_balance_curr;
407 * If we arrived back to the head again then
408 * iterate to the next queue (if any):
410 if (unlikely(head == curr)) {
411 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
413 if (next_idx >= MAX_RT_PRIO)
417 head = array->queue + idx;
420 rq->rt.rt_load_balance_idx = idx;
421 rq->rt.rt_load_balance_head = head;
424 p = list_entry(curr, struct task_struct, run_list);
428 rq->rt.rt_load_balance_curr = curr;
434 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
435 unsigned long max_load_move,
436 struct sched_domain *sd, enum cpu_idle_type idle,
437 int *all_pinned, int *this_best_prio)
439 struct rq_iterator rt_rq_iterator;
441 rt_rq_iterator.start = load_balance_start_rt;
442 rt_rq_iterator.next = load_balance_next_rt;
443 /* pass 'busiest' rq argument into
444 * load_balance_[start|next]_rt iterators
446 rt_rq_iterator.arg = busiest;
448 return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
449 idle, all_pinned, this_best_prio, &rt_rq_iterator);
453 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
454 struct sched_domain *sd, enum cpu_idle_type idle)
456 struct rq_iterator rt_rq_iterator;
458 rt_rq_iterator.start = load_balance_start_rt;
459 rt_rq_iterator.next = load_balance_next_rt;
460 rt_rq_iterator.arg = busiest;
462 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
465 #else /* CONFIG_SMP */
466 # define schedule_tail_balance_rt(rq) do { } while (0)
467 #endif /* CONFIG_SMP */
469 static void task_tick_rt(struct rq *rq, struct task_struct *p)
474 * RR tasks need a special form of timeslice management.
475 * FIFO tasks have no timeslices.
477 if (p->policy != SCHED_RR)
483 p->time_slice = DEF_TIMESLICE;
486 * Requeue to the end of queue if we are not the only element
489 if (p->run_list.prev != p->run_list.next) {
490 requeue_task_rt(rq, p);
491 set_tsk_need_resched(p);
495 static void set_curr_task_rt(struct rq *rq)
497 struct task_struct *p = rq->curr;
499 p->se.exec_start = rq->clock;
502 const struct sched_class rt_sched_class = {
503 .next = &fair_sched_class,
504 .enqueue_task = enqueue_task_rt,
505 .dequeue_task = dequeue_task_rt,
506 .yield_task = yield_task_rt,
508 .check_preempt_curr = check_preempt_curr_rt,
510 .pick_next_task = pick_next_task_rt,
511 .put_prev_task = put_prev_task_rt,
514 .load_balance = load_balance_rt,
515 .move_one_task = move_one_task_rt,
518 .set_curr_task = set_curr_task_rt,
519 .task_tick = task_tick_rt,