sched: mix tasks and groups
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 if (!rq->rt.overloaded) {
38                         rt_set_overload(rq);
39                         rq->rt.overloaded = 1;
40                 }
41         } else if (rq->rt.overloaded) {
42                 rt_clear_overload(rq);
43                 rq->rt.overloaded = 0;
44         }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50         return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55         return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62         if (!rt_rq->tg)
63                 return RUNTIME_INF;
64
65         return rt_rq->rt_runtime;
66 }
67
68 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69 {
70         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
71 }
72
73 #define for_each_leaf_rt_rq(rt_rq, rq) \
74         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77 {
78         return rt_rq->rq;
79 }
80
81 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82 {
83         return rt_se->rt_rq;
84 }
85
86 #define for_each_sched_rt_entity(rt_se) \
87         for (; rt_se; rt_se = rt_se->parent)
88
89 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90 {
91         return rt_se->my_q;
92 }
93
94 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
97 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
98 {
99         struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
102                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
104                 enqueue_rt_entity(rt_se);
105                 if (rt_rq->highest_prio < curr->prio)
106                         resched_task(curr);
107         }
108 }
109
110 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
111 {
112         struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114         if (rt_se && on_rt_rq(rt_se))
115                 dequeue_rt_entity(rt_se);
116 }
117
118 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119 {
120         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121 }
122
123 static int rt_se_boosted(struct sched_rt_entity *rt_se)
124 {
125         struct rt_rq *rt_rq = group_rt_rq(rt_se);
126         struct task_struct *p;
127
128         if (rt_rq)
129                 return !!rt_rq->rt_nr_boosted;
130
131         p = rt_task_of(rt_se);
132         return p->prio != p->normal_prio;
133 }
134
135 #ifdef CONFIG_SMP
136 static inline cpumask_t sched_rt_period_mask(void)
137 {
138         return cpu_rq(smp_processor_id())->rd->span;
139 }
140 #else
141 static inline cpumask_t sched_rt_period_mask(void)
142 {
143         return cpu_online_map;
144 }
145 #endif
146
147 static inline
148 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
149 {
150         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151 }
152
153 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154 {
155         return &rt_rq->tg->rt_bandwidth;
156 }
157
158 #else
159
160 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
161 {
162         return rt_rq->rt_runtime;
163 }
164
165 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166 {
167         return ktime_to_ns(def_rt_bandwidth.rt_period);
168 }
169
170 #define for_each_leaf_rt_rq(rt_rq, rq) \
171         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174 {
175         return container_of(rt_rq, struct rq, rt);
176 }
177
178 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179 {
180         struct task_struct *p = rt_task_of(rt_se);
181         struct rq *rq = task_rq(p);
182
183         return &rq->rt;
184 }
185
186 #define for_each_sched_rt_entity(rt_se) \
187         for (; rt_se; rt_se = NULL)
188
189 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190 {
191         return NULL;
192 }
193
194 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
195 {
196 }
197
198 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
199 {
200 }
201
202 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203 {
204         return rt_rq->rt_throttled;
205 }
206
207 static inline cpumask_t sched_rt_period_mask(void)
208 {
209         return cpu_online_map;
210 }
211
212 static inline
213 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214 {
215         return &cpu_rq(cpu)->rt;
216 }
217
218 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219 {
220         return &def_rt_bandwidth;
221 }
222
223 #endif
224
225 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226 {
227         int i, idle = 1;
228         cpumask_t span;
229
230         if (rt_b->rt_runtime == RUNTIME_INF)
231                 return 1;
232
233         span = sched_rt_period_mask();
234         for_each_cpu_mask(i, span) {
235                 int enqueue = 0;
236                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237                 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239                 spin_lock(&rq->lock);
240                 if (rt_rq->rt_time) {
241                         u64 runtime;
242
243                         spin_lock(&rt_rq->rt_runtime_lock);
244                         runtime = rt_rq->rt_runtime;
245                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247                                 rt_rq->rt_throttled = 0;
248                                 enqueue = 1;
249                         }
250                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
251                                 idle = 0;
252                         spin_unlock(&rt_rq->rt_runtime_lock);
253                 }
254
255                 if (enqueue)
256                         sched_rt_rq_enqueue(rt_rq);
257                 spin_unlock(&rq->lock);
258         }
259
260         return idle;
261 }
262
263 #ifdef CONFIG_SMP
264 static int balance_runtime(struct rt_rq *rt_rq)
265 {
266         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
267         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
268         int i, weight, more = 0;
269         u64 rt_period;
270
271         weight = cpus_weight(rd->span);
272
273         spin_lock(&rt_b->rt_runtime_lock);
274         rt_period = ktime_to_ns(rt_b->rt_period);
275         for_each_cpu_mask(i, rd->span) {
276                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277                 s64 diff;
278
279                 if (iter == rt_rq)
280                         continue;
281
282                 spin_lock(&iter->rt_runtime_lock);
283                 diff = iter->rt_runtime - iter->rt_time;
284                 if (diff > 0) {
285                         do_div(diff, weight);
286                         if (rt_rq->rt_runtime + diff > rt_period)
287                                 diff = rt_period - rt_rq->rt_runtime;
288                         iter->rt_runtime -= diff;
289                         rt_rq->rt_runtime += diff;
290                         more = 1;
291                         if (rt_rq->rt_runtime == rt_period) {
292                                 spin_unlock(&iter->rt_runtime_lock);
293                                 break;
294                         }
295                 }
296                 spin_unlock(&iter->rt_runtime_lock);
297         }
298         spin_unlock(&rt_b->rt_runtime_lock);
299
300         return more;
301 }
302 #endif
303
304 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
305 {
306 #ifdef CONFIG_RT_GROUP_SCHED
307         struct rt_rq *rt_rq = group_rt_rq(rt_se);
308
309         if (rt_rq)
310                 return rt_rq->highest_prio;
311 #endif
312
313         return rt_task_of(rt_se)->prio;
314 }
315
316 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
317 {
318         u64 runtime = sched_rt_runtime(rt_rq);
319
320         if (runtime == RUNTIME_INF)
321                 return 0;
322
323         if (rt_rq->rt_throttled)
324                 return rt_rq_throttled(rt_rq);
325
326         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
327                 return 0;
328
329 #ifdef CONFIG_SMP
330         if (rt_rq->rt_time > runtime) {
331                 int more;
332
333                 spin_unlock(&rt_rq->rt_runtime_lock);
334                 more = balance_runtime(rt_rq);
335                 spin_lock(&rt_rq->rt_runtime_lock);
336
337                 if (more)
338                         runtime = sched_rt_runtime(rt_rq);
339         }
340 #endif
341
342         if (rt_rq->rt_time > runtime) {
343                 rt_rq->rt_throttled = 1;
344                 if (rt_rq_throttled(rt_rq)) {
345                         sched_rt_rq_dequeue(rt_rq);
346                         return 1;
347                 }
348         }
349
350         return 0;
351 }
352
353 /*
354  * Update the current task's runtime statistics. Skip current tasks that
355  * are not in our scheduling class.
356  */
357 static void update_curr_rt(struct rq *rq)
358 {
359         struct task_struct *curr = rq->curr;
360         struct sched_rt_entity *rt_se = &curr->rt;
361         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
362         u64 delta_exec;
363
364         if (!task_has_rt_policy(curr))
365                 return;
366
367         delta_exec = rq->clock - curr->se.exec_start;
368         if (unlikely((s64)delta_exec < 0))
369                 delta_exec = 0;
370
371         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
372
373         curr->se.sum_exec_runtime += delta_exec;
374         curr->se.exec_start = rq->clock;
375         cpuacct_charge(curr, delta_exec);
376
377         for_each_sched_rt_entity(rt_se) {
378                 rt_rq = rt_rq_of_se(rt_se);
379
380                 spin_lock(&rt_rq->rt_runtime_lock);
381                 rt_rq->rt_time += delta_exec;
382                 if (sched_rt_runtime_exceeded(rt_rq))
383                         resched_task(curr);
384                 spin_unlock(&rt_rq->rt_runtime_lock);
385         }
386 }
387
388 static inline
389 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
390 {
391         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
392         rt_rq->rt_nr_running++;
393 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
394         if (rt_se_prio(rt_se) < rt_rq->highest_prio)
395                 rt_rq->highest_prio = rt_se_prio(rt_se);
396 #endif
397 #ifdef CONFIG_SMP
398         if (rt_se->nr_cpus_allowed > 1) {
399                 struct rq *rq = rq_of_rt_rq(rt_rq);
400                 rq->rt.rt_nr_migratory++;
401         }
402
403         update_rt_migration(rq_of_rt_rq(rt_rq));
404 #endif
405 #ifdef CONFIG_RT_GROUP_SCHED
406         if (rt_se_boosted(rt_se))
407                 rt_rq->rt_nr_boosted++;
408
409         if (rt_rq->tg)
410                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
411 #else
412         start_rt_bandwidth(&def_rt_bandwidth);
413 #endif
414 }
415
416 static inline
417 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
418 {
419         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
420         WARN_ON(!rt_rq->rt_nr_running);
421         rt_rq->rt_nr_running--;
422 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
423         if (rt_rq->rt_nr_running) {
424                 struct rt_prio_array *array;
425
426                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
427                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
428                         /* recalculate */
429                         array = &rt_rq->active;
430                         rt_rq->highest_prio =
431                                 sched_find_first_bit(array->bitmap);
432                 } /* otherwise leave rq->highest prio alone */
433         } else
434                 rt_rq->highest_prio = MAX_RT_PRIO;
435 #endif
436 #ifdef CONFIG_SMP
437         if (rt_se->nr_cpus_allowed > 1) {
438                 struct rq *rq = rq_of_rt_rq(rt_rq);
439                 rq->rt.rt_nr_migratory--;
440         }
441
442         update_rt_migration(rq_of_rt_rq(rt_rq));
443 #endif /* CONFIG_SMP */
444 #ifdef CONFIG_RT_GROUP_SCHED
445         if (rt_se_boosted(rt_se))
446                 rt_rq->rt_nr_boosted--;
447
448         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
449 #endif
450 }
451
452 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
453 {
454         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455         struct rt_prio_array *array = &rt_rq->active;
456         struct rt_rq *group_rq = group_rt_rq(rt_se);
457
458         if (group_rq && rt_rq_throttled(group_rq))
459                 return;
460
461         list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
462         __set_bit(rt_se_prio(rt_se), array->bitmap);
463
464         inc_rt_tasks(rt_se, rt_rq);
465 }
466
467 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
468 {
469         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470         struct rt_prio_array *array = &rt_rq->active;
471
472         list_del_init(&rt_se->run_list);
473         if (list_empty(array->queue + rt_se_prio(rt_se)))
474                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
475
476         dec_rt_tasks(rt_se, rt_rq);
477 }
478
479 /*
480  * Because the prio of an upper entry depends on the lower
481  * entries, we must remove entries top - down.
482  *
483  * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
484  */
485 static void dequeue_rt_stack(struct task_struct *p)
486 {
487         struct sched_rt_entity *rt_se, *top_se;
488
489         /*
490          * dequeue all, top - down.
491          */
492         do {
493                 rt_se = &p->rt;
494                 top_se = NULL;
495                 for_each_sched_rt_entity(rt_se) {
496                         if (on_rt_rq(rt_se))
497                                 top_se = rt_se;
498                 }
499                 if (top_se)
500                         dequeue_rt_entity(top_se);
501         } while (top_se);
502 }
503
504 /*
505  * Adding/removing a task to/from a priority array:
506  */
507 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
508 {
509         struct sched_rt_entity *rt_se = &p->rt;
510
511         if (wakeup)
512                 rt_se->timeout = 0;
513
514         dequeue_rt_stack(p);
515
516         /*
517          * enqueue everybody, bottom - up.
518          */
519         for_each_sched_rt_entity(rt_se)
520                 enqueue_rt_entity(rt_se);
521 }
522
523 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
524 {
525         struct sched_rt_entity *rt_se = &p->rt;
526         struct rt_rq *rt_rq;
527
528         update_curr_rt(rq);
529
530         dequeue_rt_stack(p);
531
532         /*
533          * re-enqueue all non-empty rt_rq entities.
534          */
535         for_each_sched_rt_entity(rt_se) {
536                 rt_rq = group_rt_rq(rt_se);
537                 if (rt_rq && rt_rq->rt_nr_running)
538                         enqueue_rt_entity(rt_se);
539         }
540 }
541
542 /*
543  * Put task to the end of the run list without the overhead of dequeue
544  * followed by enqueue.
545  */
546 static
547 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
548 {
549         struct rt_prio_array *array = &rt_rq->active;
550
551         list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
552 }
553
554 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
555 {
556         struct sched_rt_entity *rt_se = &p->rt;
557         struct rt_rq *rt_rq;
558
559         for_each_sched_rt_entity(rt_se) {
560                 rt_rq = rt_rq_of_se(rt_se);
561                 requeue_rt_entity(rt_rq, rt_se);
562         }
563 }
564
565 static void yield_task_rt(struct rq *rq)
566 {
567         requeue_task_rt(rq, rq->curr);
568 }
569
570 #ifdef CONFIG_SMP
571 static int find_lowest_rq(struct task_struct *task);
572
573 static int select_task_rq_rt(struct task_struct *p, int sync)
574 {
575         struct rq *rq = task_rq(p);
576
577         /*
578          * If the current task is an RT task, then
579          * try to see if we can wake this RT task up on another
580          * runqueue. Otherwise simply start this RT task
581          * on its current runqueue.
582          *
583          * We want to avoid overloading runqueues. Even if
584          * the RT task is of higher priority than the current RT task.
585          * RT tasks behave differently than other tasks. If
586          * one gets preempted, we try to push it off to another queue.
587          * So trying to keep a preempting RT task on the same
588          * cache hot CPU will force the running RT task to
589          * a cold CPU. So we waste all the cache for the lower
590          * RT task in hopes of saving some of a RT task
591          * that is just being woken and probably will have
592          * cold cache anyway.
593          */
594         if (unlikely(rt_task(rq->curr)) &&
595             (p->rt.nr_cpus_allowed > 1)) {
596                 int cpu = find_lowest_rq(p);
597
598                 return (cpu == -1) ? task_cpu(p) : cpu;
599         }
600
601         /*
602          * Otherwise, just let it ride on the affined RQ and the
603          * post-schedule router will push the preempted task away
604          */
605         return task_cpu(p);
606 }
607 #endif /* CONFIG_SMP */
608
609 /*
610  * Preempt the current task with a newly woken task if needed:
611  */
612 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
613 {
614         if (p->prio < rq->curr->prio)
615                 resched_task(rq->curr);
616 }
617
618 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
619                                                    struct rt_rq *rt_rq)
620 {
621         struct rt_prio_array *array = &rt_rq->active;
622         struct sched_rt_entity *next = NULL;
623         struct list_head *queue;
624         int idx;
625
626         idx = sched_find_first_bit(array->bitmap);
627         BUG_ON(idx >= MAX_RT_PRIO);
628
629         queue = array->queue + idx;
630         next = list_entry(queue->next, struct sched_rt_entity, run_list);
631
632         return next;
633 }
634
635 static struct task_struct *pick_next_task_rt(struct rq *rq)
636 {
637         struct sched_rt_entity *rt_se;
638         struct task_struct *p;
639         struct rt_rq *rt_rq;
640
641         rt_rq = &rq->rt;
642
643         if (unlikely(!rt_rq->rt_nr_running))
644                 return NULL;
645
646         if (rt_rq_throttled(rt_rq))
647                 return NULL;
648
649         do {
650                 rt_se = pick_next_rt_entity(rq, rt_rq);
651                 BUG_ON(!rt_se);
652                 rt_rq = group_rt_rq(rt_se);
653         } while (rt_rq);
654
655         p = rt_task_of(rt_se);
656         p->se.exec_start = rq->clock;
657         return p;
658 }
659
660 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
661 {
662         update_curr_rt(rq);
663         p->se.exec_start = 0;
664 }
665
666 #ifdef CONFIG_SMP
667
668 /* Only try algorithms three times */
669 #define RT_MAX_TRIES 3
670
671 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
672 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
673
674 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
675 {
676         if (!task_running(rq, p) &&
677             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
678             (p->rt.nr_cpus_allowed > 1))
679                 return 1;
680         return 0;
681 }
682
683 /* Return the second highest RT task, NULL otherwise */
684 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
685 {
686         struct task_struct *next = NULL;
687         struct sched_rt_entity *rt_se;
688         struct rt_prio_array *array;
689         struct rt_rq *rt_rq;
690         int idx;
691
692         for_each_leaf_rt_rq(rt_rq, rq) {
693                 array = &rt_rq->active;
694                 idx = sched_find_first_bit(array->bitmap);
695  next_idx:
696                 if (idx >= MAX_RT_PRIO)
697                         continue;
698                 if (next && next->prio < idx)
699                         continue;
700                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
701                         struct task_struct *p = rt_task_of(rt_se);
702                         if (pick_rt_task(rq, p, cpu)) {
703                                 next = p;
704                                 break;
705                         }
706                 }
707                 if (!next) {
708                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
709                         goto next_idx;
710                 }
711         }
712
713         return next;
714 }
715
716 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
717
718 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
719 {
720         int       lowest_prio = -1;
721         int       lowest_cpu  = -1;
722         int       count       = 0;
723         int       cpu;
724
725         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
726
727         /*
728          * Scan each rq for the lowest prio.
729          */
730         for_each_cpu_mask(cpu, *lowest_mask) {
731                 struct rq *rq = cpu_rq(cpu);
732
733                 /* We look for lowest RT prio or non-rt CPU */
734                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
735                         /*
736                          * if we already found a low RT queue
737                          * and now we found this non-rt queue
738                          * clear the mask and set our bit.
739                          * Otherwise just return the queue as is
740                          * and the count==1 will cause the algorithm
741                          * to use the first bit found.
742                          */
743                         if (lowest_cpu != -1) {
744                                 cpus_clear(*lowest_mask);
745                                 cpu_set(rq->cpu, *lowest_mask);
746                         }
747                         return 1;
748                 }
749
750                 /* no locking for now */
751                 if ((rq->rt.highest_prio > task->prio)
752                     && (rq->rt.highest_prio >= lowest_prio)) {
753                         if (rq->rt.highest_prio > lowest_prio) {
754                                 /* new low - clear old data */
755                                 lowest_prio = rq->rt.highest_prio;
756                                 lowest_cpu = cpu;
757                                 count = 0;
758                         }
759                         count++;
760                 } else
761                         cpu_clear(cpu, *lowest_mask);
762         }
763
764         /*
765          * Clear out all the set bits that represent
766          * runqueues that were of higher prio than
767          * the lowest_prio.
768          */
769         if (lowest_cpu > 0) {
770                 /*
771                  * Perhaps we could add another cpumask op to
772                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
773                  * Then that could be optimized to use memset and such.
774                  */
775                 for_each_cpu_mask(cpu, *lowest_mask) {
776                         if (cpu >= lowest_cpu)
777                                 break;
778                         cpu_clear(cpu, *lowest_mask);
779                 }
780         }
781
782         return count;
783 }
784
785 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
786 {
787         int first;
788
789         /* "this_cpu" is cheaper to preempt than a remote processor */
790         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
791                 return this_cpu;
792
793         first = first_cpu(*mask);
794         if (first != NR_CPUS)
795                 return first;
796
797         return -1;
798 }
799
800 static int find_lowest_rq(struct task_struct *task)
801 {
802         struct sched_domain *sd;
803         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
804         int this_cpu = smp_processor_id();
805         int cpu      = task_cpu(task);
806         int count    = find_lowest_cpus(task, lowest_mask);
807
808         if (!count)
809                 return -1; /* No targets found */
810
811         /*
812          * There is no sense in performing an optimal search if only one
813          * target is found.
814          */
815         if (count == 1)
816                 return first_cpu(*lowest_mask);
817
818         /*
819          * At this point we have built a mask of cpus representing the
820          * lowest priority tasks in the system.  Now we want to elect
821          * the best one based on our affinity and topology.
822          *
823          * We prioritize the last cpu that the task executed on since
824          * it is most likely cache-hot in that location.
825          */
826         if (cpu_isset(cpu, *lowest_mask))
827                 return cpu;
828
829         /*
830          * Otherwise, we consult the sched_domains span maps to figure
831          * out which cpu is logically closest to our hot cache data.
832          */
833         if (this_cpu == cpu)
834                 this_cpu = -1; /* Skip this_cpu opt if the same */
835
836         for_each_domain(cpu, sd) {
837                 if (sd->flags & SD_WAKE_AFFINE) {
838                         cpumask_t domain_mask;
839                         int       best_cpu;
840
841                         cpus_and(domain_mask, sd->span, *lowest_mask);
842
843                         best_cpu = pick_optimal_cpu(this_cpu,
844                                                     &domain_mask);
845                         if (best_cpu != -1)
846                                 return best_cpu;
847                 }
848         }
849
850         /*
851          * And finally, if there were no matches within the domains
852          * just give the caller *something* to work with from the compatible
853          * locations.
854          */
855         return pick_optimal_cpu(this_cpu, lowest_mask);
856 }
857
858 /* Will lock the rq it finds */
859 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
860 {
861         struct rq *lowest_rq = NULL;
862         int tries;
863         int cpu;
864
865         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
866                 cpu = find_lowest_rq(task);
867
868                 if ((cpu == -1) || (cpu == rq->cpu))
869                         break;
870
871                 lowest_rq = cpu_rq(cpu);
872
873                 /* if the prio of this runqueue changed, try again */
874                 if (double_lock_balance(rq, lowest_rq)) {
875                         /*
876                          * We had to unlock the run queue. In
877                          * the mean time, task could have
878                          * migrated already or had its affinity changed.
879                          * Also make sure that it wasn't scheduled on its rq.
880                          */
881                         if (unlikely(task_rq(task) != rq ||
882                                      !cpu_isset(lowest_rq->cpu,
883                                                 task->cpus_allowed) ||
884                                      task_running(rq, task) ||
885                                      !task->se.on_rq)) {
886
887                                 spin_unlock(&lowest_rq->lock);
888                                 lowest_rq = NULL;
889                                 break;
890                         }
891                 }
892
893                 /* If this rq is still suitable use it. */
894                 if (lowest_rq->rt.highest_prio > task->prio)
895                         break;
896
897                 /* try again */
898                 spin_unlock(&lowest_rq->lock);
899                 lowest_rq = NULL;
900         }
901
902         return lowest_rq;
903 }
904
905 /*
906  * If the current CPU has more than one RT task, see if the non
907  * running task can migrate over to a CPU that is running a task
908  * of lesser priority.
909  */
910 static int push_rt_task(struct rq *rq)
911 {
912         struct task_struct *next_task;
913         struct rq *lowest_rq;
914         int ret = 0;
915         int paranoid = RT_MAX_TRIES;
916
917         if (!rq->rt.overloaded)
918                 return 0;
919
920         next_task = pick_next_highest_task_rt(rq, -1);
921         if (!next_task)
922                 return 0;
923
924  retry:
925         if (unlikely(next_task == rq->curr)) {
926                 WARN_ON(1);
927                 return 0;
928         }
929
930         /*
931          * It's possible that the next_task slipped in of
932          * higher priority than current. If that's the case
933          * just reschedule current.
934          */
935         if (unlikely(next_task->prio < rq->curr->prio)) {
936                 resched_task(rq->curr);
937                 return 0;
938         }
939
940         /* We might release rq lock */
941         get_task_struct(next_task);
942
943         /* find_lock_lowest_rq locks the rq if found */
944         lowest_rq = find_lock_lowest_rq(next_task, rq);
945         if (!lowest_rq) {
946                 struct task_struct *task;
947                 /*
948                  * find lock_lowest_rq releases rq->lock
949                  * so it is possible that next_task has changed.
950                  * If it has, then try again.
951                  */
952                 task = pick_next_highest_task_rt(rq, -1);
953                 if (unlikely(task != next_task) && task && paranoid--) {
954                         put_task_struct(next_task);
955                         next_task = task;
956                         goto retry;
957                 }
958                 goto out;
959         }
960
961         deactivate_task(rq, next_task, 0);
962         set_task_cpu(next_task, lowest_rq->cpu);
963         activate_task(lowest_rq, next_task, 0);
964
965         resched_task(lowest_rq->curr);
966
967         spin_unlock(&lowest_rq->lock);
968
969         ret = 1;
970 out:
971         put_task_struct(next_task);
972
973         return ret;
974 }
975
976 /*
977  * TODO: Currently we just use the second highest prio task on
978  *       the queue, and stop when it can't migrate (or there's
979  *       no more RT tasks).  There may be a case where a lower
980  *       priority RT task has a different affinity than the
981  *       higher RT task. In this case the lower RT task could
982  *       possibly be able to migrate where as the higher priority
983  *       RT task could not.  We currently ignore this issue.
984  *       Enhancements are welcome!
985  */
986 static void push_rt_tasks(struct rq *rq)
987 {
988         /* push_rt_task will return true if it moved an RT */
989         while (push_rt_task(rq))
990                 ;
991 }
992
993 static int pull_rt_task(struct rq *this_rq)
994 {
995         int this_cpu = this_rq->cpu, ret = 0, cpu;
996         struct task_struct *p, *next;
997         struct rq *src_rq;
998
999         if (likely(!rt_overloaded(this_rq)))
1000                 return 0;
1001
1002         next = pick_next_task_rt(this_rq);
1003
1004         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1005                 if (this_cpu == cpu)
1006                         continue;
1007
1008                 src_rq = cpu_rq(cpu);
1009                 /*
1010                  * We can potentially drop this_rq's lock in
1011                  * double_lock_balance, and another CPU could
1012                  * steal our next task - hence we must cause
1013                  * the caller to recalculate the next task
1014                  * in that case:
1015                  */
1016                 if (double_lock_balance(this_rq, src_rq)) {
1017                         struct task_struct *old_next = next;
1018
1019                         next = pick_next_task_rt(this_rq);
1020                         if (next != old_next)
1021                                 ret = 1;
1022                 }
1023
1024                 /*
1025                  * Are there still pullable RT tasks?
1026                  */
1027                 if (src_rq->rt.rt_nr_running <= 1)
1028                         goto skip;
1029
1030                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1031
1032                 /*
1033                  * Do we have an RT task that preempts
1034                  * the to-be-scheduled task?
1035                  */
1036                 if (p && (!next || (p->prio < next->prio))) {
1037                         WARN_ON(p == src_rq->curr);
1038                         WARN_ON(!p->se.on_rq);
1039
1040                         /*
1041                          * There's a chance that p is higher in priority
1042                          * than what's currently running on its cpu.
1043                          * This is just that p is wakeing up and hasn't
1044                          * had a chance to schedule. We only pull
1045                          * p if it is lower in priority than the
1046                          * current task on the run queue or
1047                          * this_rq next task is lower in prio than
1048                          * the current task on that rq.
1049                          */
1050                         if (p->prio < src_rq->curr->prio ||
1051                             (next && next->prio < src_rq->curr->prio))
1052                                 goto skip;
1053
1054                         ret = 1;
1055
1056                         deactivate_task(src_rq, p, 0);
1057                         set_task_cpu(p, this_cpu);
1058                         activate_task(this_rq, p, 0);
1059                         /*
1060                          * We continue with the search, just in
1061                          * case there's an even higher prio task
1062                          * in another runqueue. (low likelyhood
1063                          * but possible)
1064                          *
1065                          * Update next so that we won't pick a task
1066                          * on another cpu with a priority lower (or equal)
1067                          * than the one we just picked.
1068                          */
1069                         next = p;
1070
1071                 }
1072  skip:
1073                 spin_unlock(&src_rq->lock);
1074         }
1075
1076         return ret;
1077 }
1078
1079 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1080 {
1081         /* Try to pull RT tasks here if we lower this rq's prio */
1082         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1083                 pull_rt_task(rq);
1084 }
1085
1086 static void post_schedule_rt(struct rq *rq)
1087 {
1088         /*
1089          * If we have more than one rt_task queued, then
1090          * see if we can push the other rt_tasks off to other CPUS.
1091          * Note we may release the rq lock, and since
1092          * the lock was owned by prev, we need to release it
1093          * first via finish_lock_switch and then reaquire it here.
1094          */
1095         if (unlikely(rq->rt.overloaded)) {
1096                 spin_lock_irq(&rq->lock);
1097                 push_rt_tasks(rq);
1098                 spin_unlock_irq(&rq->lock);
1099         }
1100 }
1101
1102
1103 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1104 {
1105         if (!task_running(rq, p) &&
1106             (p->prio >= rq->rt.highest_prio) &&
1107             rq->rt.overloaded)
1108                 push_rt_tasks(rq);
1109 }
1110
1111 static unsigned long
1112 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1113                 unsigned long max_load_move,
1114                 struct sched_domain *sd, enum cpu_idle_type idle,
1115                 int *all_pinned, int *this_best_prio)
1116 {
1117         /* don't touch RT tasks */
1118         return 0;
1119 }
1120
1121 static int
1122 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1123                  struct sched_domain *sd, enum cpu_idle_type idle)
1124 {
1125         /* don't touch RT tasks */
1126         return 0;
1127 }
1128
1129 static void set_cpus_allowed_rt(struct task_struct *p,
1130                                 const cpumask_t *new_mask)
1131 {
1132         int weight = cpus_weight(*new_mask);
1133
1134         BUG_ON(!rt_task(p));
1135
1136         /*
1137          * Update the migration status of the RQ if we have an RT task
1138          * which is running AND changing its weight value.
1139          */
1140         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1141                 struct rq *rq = task_rq(p);
1142
1143                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1144                         rq->rt.rt_nr_migratory++;
1145                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1146                         BUG_ON(!rq->rt.rt_nr_migratory);
1147                         rq->rt.rt_nr_migratory--;
1148                 }
1149
1150                 update_rt_migration(rq);
1151         }
1152
1153         p->cpus_allowed    = *new_mask;
1154         p->rt.nr_cpus_allowed = weight;
1155 }
1156
1157 /* Assumes rq->lock is held */
1158 static void join_domain_rt(struct rq *rq)
1159 {
1160         if (rq->rt.overloaded)
1161                 rt_set_overload(rq);
1162 }
1163
1164 /* Assumes rq->lock is held */
1165 static void leave_domain_rt(struct rq *rq)
1166 {
1167         if (rq->rt.overloaded)
1168                 rt_clear_overload(rq);
1169 }
1170
1171 /*
1172  * When switch from the rt queue, we bring ourselves to a position
1173  * that we might want to pull RT tasks from other runqueues.
1174  */
1175 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1176                            int running)
1177 {
1178         /*
1179          * If there are other RT tasks then we will reschedule
1180          * and the scheduling of the other RT tasks will handle
1181          * the balancing. But if we are the last RT task
1182          * we may need to handle the pulling of RT tasks
1183          * now.
1184          */
1185         if (!rq->rt.rt_nr_running)
1186                 pull_rt_task(rq);
1187 }
1188 #endif /* CONFIG_SMP */
1189
1190 /*
1191  * When switching a task to RT, we may overload the runqueue
1192  * with RT tasks. In this case we try to push them off to
1193  * other runqueues.
1194  */
1195 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1196                            int running)
1197 {
1198         int check_resched = 1;
1199
1200         /*
1201          * If we are already running, then there's nothing
1202          * that needs to be done. But if we are not running
1203          * we may need to preempt the current running task.
1204          * If that current running task is also an RT task
1205          * then see if we can move to another run queue.
1206          */
1207         if (!running) {
1208 #ifdef CONFIG_SMP
1209                 if (rq->rt.overloaded && push_rt_task(rq) &&
1210                     /* Don't resched if we changed runqueues */
1211                     rq != task_rq(p))
1212                         check_resched = 0;
1213 #endif /* CONFIG_SMP */
1214                 if (check_resched && p->prio < rq->curr->prio)
1215                         resched_task(rq->curr);
1216         }
1217 }
1218
1219 /*
1220  * Priority of the task has changed. This may cause
1221  * us to initiate a push or pull.
1222  */
1223 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1224                             int oldprio, int running)
1225 {
1226         if (running) {
1227 #ifdef CONFIG_SMP
1228                 /*
1229                  * If our priority decreases while running, we
1230                  * may need to pull tasks to this runqueue.
1231                  */
1232                 if (oldprio < p->prio)
1233                         pull_rt_task(rq);
1234                 /*
1235                  * If there's a higher priority task waiting to run
1236                  * then reschedule. Note, the above pull_rt_task
1237                  * can release the rq lock and p could migrate.
1238                  * Only reschedule if p is still on the same runqueue.
1239                  */
1240                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1241                         resched_task(p);
1242 #else
1243                 /* For UP simply resched on drop of prio */
1244                 if (oldprio < p->prio)
1245                         resched_task(p);
1246 #endif /* CONFIG_SMP */
1247         } else {
1248                 /*
1249                  * This task is not running, but if it is
1250                  * greater than the current running task
1251                  * then reschedule.
1252                  */
1253                 if (p->prio < rq->curr->prio)
1254                         resched_task(rq->curr);
1255         }
1256 }
1257
1258 static void watchdog(struct rq *rq, struct task_struct *p)
1259 {
1260         unsigned long soft, hard;
1261
1262         if (!p->signal)
1263                 return;
1264
1265         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1266         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1267
1268         if (soft != RLIM_INFINITY) {
1269                 unsigned long next;
1270
1271                 p->rt.timeout++;
1272                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1273                 if (p->rt.timeout > next)
1274                         p->it_sched_expires = p->se.sum_exec_runtime;
1275         }
1276 }
1277
1278 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1279 {
1280         update_curr_rt(rq);
1281
1282         watchdog(rq, p);
1283
1284         /*
1285          * RR tasks need a special form of timeslice management.
1286          * FIFO tasks have no timeslices.
1287          */
1288         if (p->policy != SCHED_RR)
1289                 return;
1290
1291         if (--p->rt.time_slice)
1292                 return;
1293
1294         p->rt.time_slice = DEF_TIMESLICE;
1295
1296         /*
1297          * Requeue to the end of queue if we are not the only element
1298          * on the queue:
1299          */
1300         if (p->rt.run_list.prev != p->rt.run_list.next) {
1301                 requeue_task_rt(rq, p);
1302                 set_tsk_need_resched(p);
1303         }
1304 }
1305
1306 static void set_curr_task_rt(struct rq *rq)
1307 {
1308         struct task_struct *p = rq->curr;
1309
1310         p->se.exec_start = rq->clock;
1311 }
1312
1313 const struct sched_class rt_sched_class = {
1314         .next                   = &fair_sched_class,
1315         .enqueue_task           = enqueue_task_rt,
1316         .dequeue_task           = dequeue_task_rt,
1317         .yield_task             = yield_task_rt,
1318 #ifdef CONFIG_SMP
1319         .select_task_rq         = select_task_rq_rt,
1320 #endif /* CONFIG_SMP */
1321
1322         .check_preempt_curr     = check_preempt_curr_rt,
1323
1324         .pick_next_task         = pick_next_task_rt,
1325         .put_prev_task          = put_prev_task_rt,
1326
1327 #ifdef CONFIG_SMP
1328         .load_balance           = load_balance_rt,
1329         .move_one_task          = move_one_task_rt,
1330         .set_cpus_allowed       = set_cpus_allowed_rt,
1331         .join_domain            = join_domain_rt,
1332         .leave_domain           = leave_domain_rt,
1333         .pre_schedule           = pre_schedule_rt,
1334         .post_schedule          = post_schedule_rt,
1335         .task_wake_up           = task_wake_up_rt,
1336         .switched_from          = switched_from_rt,
1337 #endif
1338
1339         .set_curr_task          = set_curr_task_rt,
1340         .task_tick              = task_tick_rt,
1341
1342         .prio_changed           = prio_changed_rt,
1343         .switched_to            = switched_to_rt,
1344 };