sched: convert struct root_domain to cpumask_var_t.
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         if (!rq->online)
16                 return;
17
18         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
19         /*
20          * Make sure the mask is visible before we set
21          * the overload count. That is checked to determine
22          * if we should look at the mask. It would be a shame
23          * if we looked at the mask, but the mask was not
24          * updated yet.
25          */
26         wmb();
27         atomic_inc(&rq->rd->rto_count);
28 }
29
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         if (!rq->online)
33                 return;
34
35         /* the order here really doesn't matter */
36         atomic_dec(&rq->rd->rto_count);
37         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38 }
39
40 static void update_rt_migration(struct rq *rq)
41 {
42         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43                 if (!rq->rt.overloaded) {
44                         rt_set_overload(rq);
45                         rq->rt.overloaded = 1;
46                 }
47         } else if (rq->rt.overloaded) {
48                 rt_clear_overload(rq);
49                 rq->rt.overloaded = 0;
50         }
51 }
52 #endif /* CONFIG_SMP */
53
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55 {
56         return container_of(rt_se, struct task_struct, rt);
57 }
58
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60 {
61         return !list_empty(&rt_se->run_list);
62 }
63
64 #ifdef CONFIG_RT_GROUP_SCHED
65
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67 {
68         if (!rt_rq->tg)
69                 return RUNTIME_INF;
70
71         return rt_rq->rt_runtime;
72 }
73
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75 {
76         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77 }
78
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83 {
84         return rt_rq->rq;
85 }
86
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88 {
89         return rt_se->rt_rq;
90 }
91
92 #define for_each_sched_rt_entity(rt_se) \
93         for (; rt_se; rt_se = rt_se->parent)
94
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96 {
97         return rt_se->my_q;
98 }
99
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104 {
105         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
106         struct sched_rt_entity *rt_se = rt_rq->rt_se;
107
108         if (rt_rq->rt_nr_running) {
109                 if (rt_se && !on_rt_rq(rt_se))
110                         enqueue_rt_entity(rt_se);
111                 if (rt_rq->highest_prio < curr->prio)
112                         resched_task(curr);
113         }
114 }
115
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117 {
118         struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120         if (rt_se && on_rt_rq(rt_se))
121                 dequeue_rt_entity(rt_se);
122 }
123
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125 {
126         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127 }
128
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
130 {
131         struct rt_rq *rt_rq = group_rt_rq(rt_se);
132         struct task_struct *p;
133
134         if (rt_rq)
135                 return !!rt_rq->rt_nr_boosted;
136
137         p = rt_task_of(rt_se);
138         return p->prio != p->normal_prio;
139 }
140
141 #ifdef CONFIG_SMP
142 static inline const struct cpumask *sched_rt_period_mask(void)
143 {
144         return cpu_rq(smp_processor_id())->rd->span;
145 }
146 #else
147 static inline const struct cpumask *sched_rt_period_mask(void)
148 {
149         return cpu_online_mask;
150 }
151 #endif
152
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155 {
156         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157 }
158
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160 {
161         return &rt_rq->tg->rt_bandwidth;
162 }
163
164 #else /* !CONFIG_RT_GROUP_SCHED */
165
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167 {
168         return rt_rq->rt_runtime;
169 }
170
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172 {
173         return ktime_to_ns(def_rt_bandwidth.rt_period);
174 }
175
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181         return container_of(rt_rq, struct rq, rt);
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         struct task_struct *p = rt_task_of(rt_se);
187         struct rq *rq = task_rq(p);
188
189         return &rq->rt;
190 }
191
192 #define for_each_sched_rt_entity(rt_se) \
193         for (; rt_se; rt_se = NULL)
194
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196 {
197         return NULL;
198 }
199
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         if (rt_rq->rt_nr_running)
203                 resched_task(rq_of_rt_rq(rt_rq)->curr);
204 }
205
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
207 {
208 }
209
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
211 {
212         return rt_rq->rt_throttled;
213 }
214
215 static inline const struct cpumask *sched_rt_period_mask(void)
216 {
217         return cpu_online_mask;
218 }
219
220 static inline
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
222 {
223         return &cpu_rq(cpu)->rt;
224 }
225
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
227 {
228         return &def_rt_bandwidth;
229 }
230
231 #endif /* CONFIG_RT_GROUP_SCHED */
232
233 #ifdef CONFIG_SMP
234 /*
235  * We ran out of runtime, see if we can borrow some from our neighbours.
236  */
237 static int do_balance_runtime(struct rt_rq *rt_rq)
238 {
239         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
240         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
241         int i, weight, more = 0;
242         u64 rt_period;
243
244         weight = cpumask_weight(rd->span);
245
246         spin_lock(&rt_b->rt_runtime_lock);
247         rt_period = ktime_to_ns(rt_b->rt_period);
248         for_each_cpu(i, rd->span) {
249                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
250                 s64 diff;
251
252                 if (iter == rt_rq)
253                         continue;
254
255                 spin_lock(&iter->rt_runtime_lock);
256                 /*
257                  * Either all rqs have inf runtime and there's nothing to steal
258                  * or __disable_runtime() below sets a specific rq to inf to
259                  * indicate its been disabled and disalow stealing.
260                  */
261                 if (iter->rt_runtime == RUNTIME_INF)
262                         goto next;
263
264                 /*
265                  * From runqueues with spare time, take 1/n part of their
266                  * spare time, but no more than our period.
267                  */
268                 diff = iter->rt_runtime - iter->rt_time;
269                 if (diff > 0) {
270                         diff = div_u64((u64)diff, weight);
271                         if (rt_rq->rt_runtime + diff > rt_period)
272                                 diff = rt_period - rt_rq->rt_runtime;
273                         iter->rt_runtime -= diff;
274                         rt_rq->rt_runtime += diff;
275                         more = 1;
276                         if (rt_rq->rt_runtime == rt_period) {
277                                 spin_unlock(&iter->rt_runtime_lock);
278                                 break;
279                         }
280                 }
281 next:
282                 spin_unlock(&iter->rt_runtime_lock);
283         }
284         spin_unlock(&rt_b->rt_runtime_lock);
285
286         return more;
287 }
288
289 /*
290  * Ensure this RQ takes back all the runtime it lend to its neighbours.
291  */
292 static void __disable_runtime(struct rq *rq)
293 {
294         struct root_domain *rd = rq->rd;
295         struct rt_rq *rt_rq;
296
297         if (unlikely(!scheduler_running))
298                 return;
299
300         for_each_leaf_rt_rq(rt_rq, rq) {
301                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
302                 s64 want;
303                 int i;
304
305                 spin_lock(&rt_b->rt_runtime_lock);
306                 spin_lock(&rt_rq->rt_runtime_lock);
307                 /*
308                  * Either we're all inf and nobody needs to borrow, or we're
309                  * already disabled and thus have nothing to do, or we have
310                  * exactly the right amount of runtime to take out.
311                  */
312                 if (rt_rq->rt_runtime == RUNTIME_INF ||
313                                 rt_rq->rt_runtime == rt_b->rt_runtime)
314                         goto balanced;
315                 spin_unlock(&rt_rq->rt_runtime_lock);
316
317                 /*
318                  * Calculate the difference between what we started out with
319                  * and what we current have, that's the amount of runtime
320                  * we lend and now have to reclaim.
321                  */
322                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
323
324                 /*
325                  * Greedy reclaim, take back as much as we can.
326                  */
327                 for_each_cpu(i, rd->span) {
328                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
329                         s64 diff;
330
331                         /*
332                          * Can't reclaim from ourselves or disabled runqueues.
333                          */
334                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
335                                 continue;
336
337                         spin_lock(&iter->rt_runtime_lock);
338                         if (want > 0) {
339                                 diff = min_t(s64, iter->rt_runtime, want);
340                                 iter->rt_runtime -= diff;
341                                 want -= diff;
342                         } else {
343                                 iter->rt_runtime -= want;
344                                 want -= want;
345                         }
346                         spin_unlock(&iter->rt_runtime_lock);
347
348                         if (!want)
349                                 break;
350                 }
351
352                 spin_lock(&rt_rq->rt_runtime_lock);
353                 /*
354                  * We cannot be left wanting - that would mean some runtime
355                  * leaked out of the system.
356                  */
357                 BUG_ON(want);
358 balanced:
359                 /*
360                  * Disable all the borrow logic by pretending we have inf
361                  * runtime - in which case borrowing doesn't make sense.
362                  */
363                 rt_rq->rt_runtime = RUNTIME_INF;
364                 spin_unlock(&rt_rq->rt_runtime_lock);
365                 spin_unlock(&rt_b->rt_runtime_lock);
366         }
367 }
368
369 static void disable_runtime(struct rq *rq)
370 {
371         unsigned long flags;
372
373         spin_lock_irqsave(&rq->lock, flags);
374         __disable_runtime(rq);
375         spin_unlock_irqrestore(&rq->lock, flags);
376 }
377
378 static void __enable_runtime(struct rq *rq)
379 {
380         struct rt_rq *rt_rq;
381
382         if (unlikely(!scheduler_running))
383                 return;
384
385         /*
386          * Reset each runqueue's bandwidth settings
387          */
388         for_each_leaf_rt_rq(rt_rq, rq) {
389                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
390
391                 spin_lock(&rt_b->rt_runtime_lock);
392                 spin_lock(&rt_rq->rt_runtime_lock);
393                 rt_rq->rt_runtime = rt_b->rt_runtime;
394                 rt_rq->rt_time = 0;
395                 rt_rq->rt_throttled = 0;
396                 spin_unlock(&rt_rq->rt_runtime_lock);
397                 spin_unlock(&rt_b->rt_runtime_lock);
398         }
399 }
400
401 static void enable_runtime(struct rq *rq)
402 {
403         unsigned long flags;
404
405         spin_lock_irqsave(&rq->lock, flags);
406         __enable_runtime(rq);
407         spin_unlock_irqrestore(&rq->lock, flags);
408 }
409
410 static int balance_runtime(struct rt_rq *rt_rq)
411 {
412         int more = 0;
413
414         if (rt_rq->rt_time > rt_rq->rt_runtime) {
415                 spin_unlock(&rt_rq->rt_runtime_lock);
416                 more = do_balance_runtime(rt_rq);
417                 spin_lock(&rt_rq->rt_runtime_lock);
418         }
419
420         return more;
421 }
422 #else /* !CONFIG_SMP */
423 static inline int balance_runtime(struct rt_rq *rt_rq)
424 {
425         return 0;
426 }
427 #endif /* CONFIG_SMP */
428
429 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
430 {
431         int i, idle = 1;
432         const struct cpumask *span;
433
434         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
435                 return 1;
436
437         span = sched_rt_period_mask();
438         for_each_cpu(i, span) {
439                 int enqueue = 0;
440                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
441                 struct rq *rq = rq_of_rt_rq(rt_rq);
442
443                 spin_lock(&rq->lock);
444                 if (rt_rq->rt_time) {
445                         u64 runtime;
446
447                         spin_lock(&rt_rq->rt_runtime_lock);
448                         if (rt_rq->rt_throttled)
449                                 balance_runtime(rt_rq);
450                         runtime = rt_rq->rt_runtime;
451                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
452                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
453                                 rt_rq->rt_throttled = 0;
454                                 enqueue = 1;
455                         }
456                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
457                                 idle = 0;
458                         spin_unlock(&rt_rq->rt_runtime_lock);
459                 } else if (rt_rq->rt_nr_running)
460                         idle = 0;
461
462                 if (enqueue)
463                         sched_rt_rq_enqueue(rt_rq);
464                 spin_unlock(&rq->lock);
465         }
466
467         return idle;
468 }
469
470 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
471 {
472 #ifdef CONFIG_RT_GROUP_SCHED
473         struct rt_rq *rt_rq = group_rt_rq(rt_se);
474
475         if (rt_rq)
476                 return rt_rq->highest_prio;
477 #endif
478
479         return rt_task_of(rt_se)->prio;
480 }
481
482 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
483 {
484         u64 runtime = sched_rt_runtime(rt_rq);
485
486         if (rt_rq->rt_throttled)
487                 return rt_rq_throttled(rt_rq);
488
489         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
490                 return 0;
491
492         balance_runtime(rt_rq);
493         runtime = sched_rt_runtime(rt_rq);
494         if (runtime == RUNTIME_INF)
495                 return 0;
496
497         if (rt_rq->rt_time > runtime) {
498                 rt_rq->rt_throttled = 1;
499                 if (rt_rq_throttled(rt_rq)) {
500                         sched_rt_rq_dequeue(rt_rq);
501                         return 1;
502                 }
503         }
504
505         return 0;
506 }
507
508 /*
509  * Update the current task's runtime statistics. Skip current tasks that
510  * are not in our scheduling class.
511  */
512 static void update_curr_rt(struct rq *rq)
513 {
514         struct task_struct *curr = rq->curr;
515         struct sched_rt_entity *rt_se = &curr->rt;
516         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
517         u64 delta_exec;
518
519         if (!task_has_rt_policy(curr))
520                 return;
521
522         delta_exec = rq->clock - curr->se.exec_start;
523         if (unlikely((s64)delta_exec < 0))
524                 delta_exec = 0;
525
526         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
527
528         curr->se.sum_exec_runtime += delta_exec;
529         account_group_exec_runtime(curr, delta_exec);
530
531         curr->se.exec_start = rq->clock;
532         cpuacct_charge(curr, delta_exec);
533
534         if (!rt_bandwidth_enabled())
535                 return;
536
537         for_each_sched_rt_entity(rt_se) {
538                 rt_rq = rt_rq_of_se(rt_se);
539
540                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
541                         spin_lock(&rt_rq->rt_runtime_lock);
542                         rt_rq->rt_time += delta_exec;
543                         if (sched_rt_runtime_exceeded(rt_rq))
544                                 resched_task(curr);
545                         spin_unlock(&rt_rq->rt_runtime_lock);
546                 }
547         }
548 }
549
550 static inline
551 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
552 {
553         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
554         rt_rq->rt_nr_running++;
555 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
556         if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
557 #ifdef CONFIG_SMP
558                 struct rq *rq = rq_of_rt_rq(rt_rq);
559 #endif
560
561                 rt_rq->highest_prio = rt_se_prio(rt_se);
562 #ifdef CONFIG_SMP
563                 if (rq->online)
564                         cpupri_set(&rq->rd->cpupri, rq->cpu,
565                                    rt_se_prio(rt_se));
566 #endif
567         }
568 #endif
569 #ifdef CONFIG_SMP
570         if (rt_se->nr_cpus_allowed > 1) {
571                 struct rq *rq = rq_of_rt_rq(rt_rq);
572
573                 rq->rt.rt_nr_migratory++;
574         }
575
576         update_rt_migration(rq_of_rt_rq(rt_rq));
577 #endif
578 #ifdef CONFIG_RT_GROUP_SCHED
579         if (rt_se_boosted(rt_se))
580                 rt_rq->rt_nr_boosted++;
581
582         if (rt_rq->tg)
583                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
584 #else
585         start_rt_bandwidth(&def_rt_bandwidth);
586 #endif
587 }
588
589 static inline
590 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
591 {
592 #ifdef CONFIG_SMP
593         int highest_prio = rt_rq->highest_prio;
594 #endif
595
596         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597         WARN_ON(!rt_rq->rt_nr_running);
598         rt_rq->rt_nr_running--;
599 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
600         if (rt_rq->rt_nr_running) {
601                 struct rt_prio_array *array;
602
603                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
604                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
605                         /* recalculate */
606                         array = &rt_rq->active;
607                         rt_rq->highest_prio =
608                                 sched_find_first_bit(array->bitmap);
609                 } /* otherwise leave rq->highest prio alone */
610         } else
611                 rt_rq->highest_prio = MAX_RT_PRIO;
612 #endif
613 #ifdef CONFIG_SMP
614         if (rt_se->nr_cpus_allowed > 1) {
615                 struct rq *rq = rq_of_rt_rq(rt_rq);
616                 rq->rt.rt_nr_migratory--;
617         }
618
619         if (rt_rq->highest_prio != highest_prio) {
620                 struct rq *rq = rq_of_rt_rq(rt_rq);
621
622                 if (rq->online)
623                         cpupri_set(&rq->rd->cpupri, rq->cpu,
624                                    rt_rq->highest_prio);
625         }
626
627         update_rt_migration(rq_of_rt_rq(rt_rq));
628 #endif /* CONFIG_SMP */
629 #ifdef CONFIG_RT_GROUP_SCHED
630         if (rt_se_boosted(rt_se))
631                 rt_rq->rt_nr_boosted--;
632
633         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634 #endif
635 }
636
637 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
638 {
639         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
640         struct rt_prio_array *array = &rt_rq->active;
641         struct rt_rq *group_rq = group_rt_rq(rt_se);
642         struct list_head *queue = array->queue + rt_se_prio(rt_se);
643
644         /*
645          * Don't enqueue the group if its throttled, or when empty.
646          * The latter is a consequence of the former when a child group
647          * get throttled and the current group doesn't have any other
648          * active members.
649          */
650         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
651                 return;
652
653         list_add_tail(&rt_se->run_list, queue);
654         __set_bit(rt_se_prio(rt_se), array->bitmap);
655
656         inc_rt_tasks(rt_se, rt_rq);
657 }
658
659 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
660 {
661         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
662         struct rt_prio_array *array = &rt_rq->active;
663
664         list_del_init(&rt_se->run_list);
665         if (list_empty(array->queue + rt_se_prio(rt_se)))
666                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
667
668         dec_rt_tasks(rt_se, rt_rq);
669 }
670
671 /*
672  * Because the prio of an upper entry depends on the lower
673  * entries, we must remove entries top - down.
674  */
675 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
676 {
677         struct sched_rt_entity *back = NULL;
678
679         for_each_sched_rt_entity(rt_se) {
680                 rt_se->back = back;
681                 back = rt_se;
682         }
683
684         for (rt_se = back; rt_se; rt_se = rt_se->back) {
685                 if (on_rt_rq(rt_se))
686                         __dequeue_rt_entity(rt_se);
687         }
688 }
689
690 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
691 {
692         dequeue_rt_stack(rt_se);
693         for_each_sched_rt_entity(rt_se)
694                 __enqueue_rt_entity(rt_se);
695 }
696
697 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
698 {
699         dequeue_rt_stack(rt_se);
700
701         for_each_sched_rt_entity(rt_se) {
702                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
703
704                 if (rt_rq && rt_rq->rt_nr_running)
705                         __enqueue_rt_entity(rt_se);
706         }
707 }
708
709 /*
710  * Adding/removing a task to/from a priority array:
711  */
712 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
713 {
714         struct sched_rt_entity *rt_se = &p->rt;
715
716         if (wakeup)
717                 rt_se->timeout = 0;
718
719         enqueue_rt_entity(rt_se);
720
721         inc_cpu_load(rq, p->se.load.weight);
722 }
723
724 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
725 {
726         struct sched_rt_entity *rt_se = &p->rt;
727
728         update_curr_rt(rq);
729         dequeue_rt_entity(rt_se);
730
731         dec_cpu_load(rq, p->se.load.weight);
732 }
733
734 /*
735  * Put task to the end of the run list without the overhead of dequeue
736  * followed by enqueue.
737  */
738 static void
739 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
740 {
741         if (on_rt_rq(rt_se)) {
742                 struct rt_prio_array *array = &rt_rq->active;
743                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
744
745                 if (head)
746                         list_move(&rt_se->run_list, queue);
747                 else
748                         list_move_tail(&rt_se->run_list, queue);
749         }
750 }
751
752 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
753 {
754         struct sched_rt_entity *rt_se = &p->rt;
755         struct rt_rq *rt_rq;
756
757         for_each_sched_rt_entity(rt_se) {
758                 rt_rq = rt_rq_of_se(rt_se);
759                 requeue_rt_entity(rt_rq, rt_se, head);
760         }
761 }
762
763 static void yield_task_rt(struct rq *rq)
764 {
765         requeue_task_rt(rq, rq->curr, 0);
766 }
767
768 #ifdef CONFIG_SMP
769 static int find_lowest_rq(struct task_struct *task);
770
771 static int select_task_rq_rt(struct task_struct *p, int sync)
772 {
773         struct rq *rq = task_rq(p);
774
775         /*
776          * If the current task is an RT task, then
777          * try to see if we can wake this RT task up on another
778          * runqueue. Otherwise simply start this RT task
779          * on its current runqueue.
780          *
781          * We want to avoid overloading runqueues. Even if
782          * the RT task is of higher priority than the current RT task.
783          * RT tasks behave differently than other tasks. If
784          * one gets preempted, we try to push it off to another queue.
785          * So trying to keep a preempting RT task on the same
786          * cache hot CPU will force the running RT task to
787          * a cold CPU. So we waste all the cache for the lower
788          * RT task in hopes of saving some of a RT task
789          * that is just being woken and probably will have
790          * cold cache anyway.
791          */
792         if (unlikely(rt_task(rq->curr)) &&
793             (p->rt.nr_cpus_allowed > 1)) {
794                 int cpu = find_lowest_rq(p);
795
796                 return (cpu == -1) ? task_cpu(p) : cpu;
797         }
798
799         /*
800          * Otherwise, just let it ride on the affined RQ and the
801          * post-schedule router will push the preempted task away
802          */
803         return task_cpu(p);
804 }
805
806 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
807 {
808         cpumask_t mask;
809
810         if (rq->curr->rt.nr_cpus_allowed == 1)
811                 return;
812
813         if (p->rt.nr_cpus_allowed != 1
814             && cpupri_find(&rq->rd->cpupri, p, &mask))
815                 return;
816
817         if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
818                 return;
819
820         /*
821          * There appears to be other cpus that can accept
822          * current and none to run 'p', so lets reschedule
823          * to try and push current away:
824          */
825         requeue_task_rt(rq, p, 1);
826         resched_task(rq->curr);
827 }
828
829 #endif /* CONFIG_SMP */
830
831 /*
832  * Preempt the current task with a newly woken task if needed:
833  */
834 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
835 {
836         if (p->prio < rq->curr->prio) {
837                 resched_task(rq->curr);
838                 return;
839         }
840
841 #ifdef CONFIG_SMP
842         /*
843          * If:
844          *
845          * - the newly woken task is of equal priority to the current task
846          * - the newly woken task is non-migratable while current is migratable
847          * - current will be preempted on the next reschedule
848          *
849          * we should check to see if current can readily move to a different
850          * cpu.  If so, we will reschedule to allow the push logic to try
851          * to move current somewhere else, making room for our non-migratable
852          * task.
853          */
854         if (p->prio == rq->curr->prio && !need_resched())
855                 check_preempt_equal_prio(rq, p);
856 #endif
857 }
858
859 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
860                                                    struct rt_rq *rt_rq)
861 {
862         struct rt_prio_array *array = &rt_rq->active;
863         struct sched_rt_entity *next = NULL;
864         struct list_head *queue;
865         int idx;
866
867         idx = sched_find_first_bit(array->bitmap);
868         BUG_ON(idx >= MAX_RT_PRIO);
869
870         queue = array->queue + idx;
871         next = list_entry(queue->next, struct sched_rt_entity, run_list);
872
873         return next;
874 }
875
876 static struct task_struct *pick_next_task_rt(struct rq *rq)
877 {
878         struct sched_rt_entity *rt_se;
879         struct task_struct *p;
880         struct rt_rq *rt_rq;
881
882         rt_rq = &rq->rt;
883
884         if (unlikely(!rt_rq->rt_nr_running))
885                 return NULL;
886
887         if (rt_rq_throttled(rt_rq))
888                 return NULL;
889
890         do {
891                 rt_se = pick_next_rt_entity(rq, rt_rq);
892                 BUG_ON(!rt_se);
893                 rt_rq = group_rt_rq(rt_se);
894         } while (rt_rq);
895
896         p = rt_task_of(rt_se);
897         p->se.exec_start = rq->clock;
898         return p;
899 }
900
901 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
902 {
903         update_curr_rt(rq);
904         p->se.exec_start = 0;
905 }
906
907 #ifdef CONFIG_SMP
908
909 /* Only try algorithms three times */
910 #define RT_MAX_TRIES 3
911
912 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
913 static inline void double_unlock_balance(struct rq *this_rq,
914                                                 struct rq *busiest);
915
916 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
917
918 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
919 {
920         if (!task_running(rq, p) &&
921             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
922             (p->rt.nr_cpus_allowed > 1))
923                 return 1;
924         return 0;
925 }
926
927 /* Return the second highest RT task, NULL otherwise */
928 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
929 {
930         struct task_struct *next = NULL;
931         struct sched_rt_entity *rt_se;
932         struct rt_prio_array *array;
933         struct rt_rq *rt_rq;
934         int idx;
935
936         for_each_leaf_rt_rq(rt_rq, rq) {
937                 array = &rt_rq->active;
938                 idx = sched_find_first_bit(array->bitmap);
939  next_idx:
940                 if (idx >= MAX_RT_PRIO)
941                         continue;
942                 if (next && next->prio < idx)
943                         continue;
944                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
945                         struct task_struct *p = rt_task_of(rt_se);
946                         if (pick_rt_task(rq, p, cpu)) {
947                                 next = p;
948                                 break;
949                         }
950                 }
951                 if (!next) {
952                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
953                         goto next_idx;
954                 }
955         }
956
957         return next;
958 }
959
960 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
961
962 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
963 {
964         int first;
965
966         /* "this_cpu" is cheaper to preempt than a remote processor */
967         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
968                 return this_cpu;
969
970         first = first_cpu(*mask);
971         if (first != NR_CPUS)
972                 return first;
973
974         return -1;
975 }
976
977 static int find_lowest_rq(struct task_struct *task)
978 {
979         struct sched_domain *sd;
980         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
981         int this_cpu = smp_processor_id();
982         int cpu      = task_cpu(task);
983
984         if (task->rt.nr_cpus_allowed == 1)
985                 return -1; /* No other targets possible */
986
987         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
988                 return -1; /* No targets found */
989
990         /*
991          * Only consider CPUs that are usable for migration.
992          * I guess we might want to change cpupri_find() to ignore those
993          * in the first place.
994          */
995         cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
996
997         /*
998          * At this point we have built a mask of cpus representing the
999          * lowest priority tasks in the system.  Now we want to elect
1000          * the best one based on our affinity and topology.
1001          *
1002          * We prioritize the last cpu that the task executed on since
1003          * it is most likely cache-hot in that location.
1004          */
1005         if (cpu_isset(cpu, *lowest_mask))
1006                 return cpu;
1007
1008         /*
1009          * Otherwise, we consult the sched_domains span maps to figure
1010          * out which cpu is logically closest to our hot cache data.
1011          */
1012         if (this_cpu == cpu)
1013                 this_cpu = -1; /* Skip this_cpu opt if the same */
1014
1015         for_each_domain(cpu, sd) {
1016                 if (sd->flags & SD_WAKE_AFFINE) {
1017                         cpumask_t domain_mask;
1018                         int       best_cpu;
1019
1020                         cpumask_and(&domain_mask, sched_domain_span(sd),
1021                                     lowest_mask);
1022
1023                         best_cpu = pick_optimal_cpu(this_cpu,
1024                                                     &domain_mask);
1025                         if (best_cpu != -1)
1026                                 return best_cpu;
1027                 }
1028         }
1029
1030         /*
1031          * And finally, if there were no matches within the domains
1032          * just give the caller *something* to work with from the compatible
1033          * locations.
1034          */
1035         return pick_optimal_cpu(this_cpu, lowest_mask);
1036 }
1037
1038 /* Will lock the rq it finds */
1039 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1040 {
1041         struct rq *lowest_rq = NULL;
1042         int tries;
1043         int cpu;
1044
1045         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1046                 cpu = find_lowest_rq(task);
1047
1048                 if ((cpu == -1) || (cpu == rq->cpu))
1049                         break;
1050
1051                 lowest_rq = cpu_rq(cpu);
1052
1053                 /* if the prio of this runqueue changed, try again */
1054                 if (double_lock_balance(rq, lowest_rq)) {
1055                         /*
1056                          * We had to unlock the run queue. In
1057                          * the mean time, task could have
1058                          * migrated already or had its affinity changed.
1059                          * Also make sure that it wasn't scheduled on its rq.
1060                          */
1061                         if (unlikely(task_rq(task) != rq ||
1062                                      !cpu_isset(lowest_rq->cpu,
1063                                                 task->cpus_allowed) ||
1064                                      task_running(rq, task) ||
1065                                      !task->se.on_rq)) {
1066
1067                                 spin_unlock(&lowest_rq->lock);
1068                                 lowest_rq = NULL;
1069                                 break;
1070                         }
1071                 }
1072
1073                 /* If this rq is still suitable use it. */
1074                 if (lowest_rq->rt.highest_prio > task->prio)
1075                         break;
1076
1077                 /* try again */
1078                 double_unlock_balance(rq, lowest_rq);
1079                 lowest_rq = NULL;
1080         }
1081
1082         return lowest_rq;
1083 }
1084
1085 /*
1086  * If the current CPU has more than one RT task, see if the non
1087  * running task can migrate over to a CPU that is running a task
1088  * of lesser priority.
1089  */
1090 static int push_rt_task(struct rq *rq)
1091 {
1092         struct task_struct *next_task;
1093         struct rq *lowest_rq;
1094         int ret = 0;
1095         int paranoid = RT_MAX_TRIES;
1096
1097         if (!rq->rt.overloaded)
1098                 return 0;
1099
1100         next_task = pick_next_highest_task_rt(rq, -1);
1101         if (!next_task)
1102                 return 0;
1103
1104  retry:
1105         if (unlikely(next_task == rq->curr)) {
1106                 WARN_ON(1);
1107                 return 0;
1108         }
1109
1110         /*
1111          * It's possible that the next_task slipped in of
1112          * higher priority than current. If that's the case
1113          * just reschedule current.
1114          */
1115         if (unlikely(next_task->prio < rq->curr->prio)) {
1116                 resched_task(rq->curr);
1117                 return 0;
1118         }
1119
1120         /* We might release rq lock */
1121         get_task_struct(next_task);
1122
1123         /* find_lock_lowest_rq locks the rq if found */
1124         lowest_rq = find_lock_lowest_rq(next_task, rq);
1125         if (!lowest_rq) {
1126                 struct task_struct *task;
1127                 /*
1128                  * find lock_lowest_rq releases rq->lock
1129                  * so it is possible that next_task has changed.
1130                  * If it has, then try again.
1131                  */
1132                 task = pick_next_highest_task_rt(rq, -1);
1133                 if (unlikely(task != next_task) && task && paranoid--) {
1134                         put_task_struct(next_task);
1135                         next_task = task;
1136                         goto retry;
1137                 }
1138                 goto out;
1139         }
1140
1141         deactivate_task(rq, next_task, 0);
1142         set_task_cpu(next_task, lowest_rq->cpu);
1143         activate_task(lowest_rq, next_task, 0);
1144
1145         resched_task(lowest_rq->curr);
1146
1147         double_unlock_balance(rq, lowest_rq);
1148
1149         ret = 1;
1150 out:
1151         put_task_struct(next_task);
1152
1153         return ret;
1154 }
1155
1156 /*
1157  * TODO: Currently we just use the second highest prio task on
1158  *       the queue, and stop when it can't migrate (or there's
1159  *       no more RT tasks).  There may be a case where a lower
1160  *       priority RT task has a different affinity than the
1161  *       higher RT task. In this case the lower RT task could
1162  *       possibly be able to migrate where as the higher priority
1163  *       RT task could not.  We currently ignore this issue.
1164  *       Enhancements are welcome!
1165  */
1166 static void push_rt_tasks(struct rq *rq)
1167 {
1168         /* push_rt_task will return true if it moved an RT */
1169         while (push_rt_task(rq))
1170                 ;
1171 }
1172
1173 static int pull_rt_task(struct rq *this_rq)
1174 {
1175         int this_cpu = this_rq->cpu, ret = 0, cpu;
1176         struct task_struct *p, *next;
1177         struct rq *src_rq;
1178
1179         if (likely(!rt_overloaded(this_rq)))
1180                 return 0;
1181
1182         next = pick_next_task_rt(this_rq);
1183
1184         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1185                 if (this_cpu == cpu)
1186                         continue;
1187
1188                 src_rq = cpu_rq(cpu);
1189                 /*
1190                  * We can potentially drop this_rq's lock in
1191                  * double_lock_balance, and another CPU could
1192                  * steal our next task - hence we must cause
1193                  * the caller to recalculate the next task
1194                  * in that case:
1195                  */
1196                 if (double_lock_balance(this_rq, src_rq)) {
1197                         struct task_struct *old_next = next;
1198
1199                         next = pick_next_task_rt(this_rq);
1200                         if (next != old_next)
1201                                 ret = 1;
1202                 }
1203
1204                 /*
1205                  * Are there still pullable RT tasks?
1206                  */
1207                 if (src_rq->rt.rt_nr_running <= 1)
1208                         goto skip;
1209
1210                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1211
1212                 /*
1213                  * Do we have an RT task that preempts
1214                  * the to-be-scheduled task?
1215                  */
1216                 if (p && (!next || (p->prio < next->prio))) {
1217                         WARN_ON(p == src_rq->curr);
1218                         WARN_ON(!p->se.on_rq);
1219
1220                         /*
1221                          * There's a chance that p is higher in priority
1222                          * than what's currently running on its cpu.
1223                          * This is just that p is wakeing up and hasn't
1224                          * had a chance to schedule. We only pull
1225                          * p if it is lower in priority than the
1226                          * current task on the run queue or
1227                          * this_rq next task is lower in prio than
1228                          * the current task on that rq.
1229                          */
1230                         if (p->prio < src_rq->curr->prio ||
1231                             (next && next->prio < src_rq->curr->prio))
1232                                 goto skip;
1233
1234                         ret = 1;
1235
1236                         deactivate_task(src_rq, p, 0);
1237                         set_task_cpu(p, this_cpu);
1238                         activate_task(this_rq, p, 0);
1239                         /*
1240                          * We continue with the search, just in
1241                          * case there's an even higher prio task
1242                          * in another runqueue. (low likelyhood
1243                          * but possible)
1244                          *
1245                          * Update next so that we won't pick a task
1246                          * on another cpu with a priority lower (or equal)
1247                          * than the one we just picked.
1248                          */
1249                         next = p;
1250
1251                 }
1252  skip:
1253                 double_unlock_balance(this_rq, src_rq);
1254         }
1255
1256         return ret;
1257 }
1258
1259 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1260 {
1261         /* Try to pull RT tasks here if we lower this rq's prio */
1262         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1263                 pull_rt_task(rq);
1264 }
1265
1266 static void post_schedule_rt(struct rq *rq)
1267 {
1268         /*
1269          * If we have more than one rt_task queued, then
1270          * see if we can push the other rt_tasks off to other CPUS.
1271          * Note we may release the rq lock, and since
1272          * the lock was owned by prev, we need to release it
1273          * first via finish_lock_switch and then reaquire it here.
1274          */
1275         if (unlikely(rq->rt.overloaded)) {
1276                 spin_lock_irq(&rq->lock);
1277                 push_rt_tasks(rq);
1278                 spin_unlock_irq(&rq->lock);
1279         }
1280 }
1281
1282 /*
1283  * If we are not running and we are not going to reschedule soon, we should
1284  * try to push tasks away now
1285  */
1286 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1287 {
1288         if (!task_running(rq, p) &&
1289             !test_tsk_need_resched(rq->curr) &&
1290             rq->rt.overloaded)
1291                 push_rt_tasks(rq);
1292 }
1293
1294 static unsigned long
1295 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1296                 unsigned long max_load_move,
1297                 struct sched_domain *sd, enum cpu_idle_type idle,
1298                 int *all_pinned, int *this_best_prio)
1299 {
1300         /* don't touch RT tasks */
1301         return 0;
1302 }
1303
1304 static int
1305 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1306                  struct sched_domain *sd, enum cpu_idle_type idle)
1307 {
1308         /* don't touch RT tasks */
1309         return 0;
1310 }
1311
1312 static void set_cpus_allowed_rt(struct task_struct *p,
1313                                 const cpumask_t *new_mask)
1314 {
1315         int weight = cpus_weight(*new_mask);
1316
1317         BUG_ON(!rt_task(p));
1318
1319         /*
1320          * Update the migration status of the RQ if we have an RT task
1321          * which is running AND changing its weight value.
1322          */
1323         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1324                 struct rq *rq = task_rq(p);
1325
1326                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1327                         rq->rt.rt_nr_migratory++;
1328                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1329                         BUG_ON(!rq->rt.rt_nr_migratory);
1330                         rq->rt.rt_nr_migratory--;
1331                 }
1332
1333                 update_rt_migration(rq);
1334         }
1335
1336         p->cpus_allowed    = *new_mask;
1337         p->rt.nr_cpus_allowed = weight;
1338 }
1339
1340 /* Assumes rq->lock is held */
1341 static void rq_online_rt(struct rq *rq)
1342 {
1343         if (rq->rt.overloaded)
1344                 rt_set_overload(rq);
1345
1346         __enable_runtime(rq);
1347
1348         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1349 }
1350
1351 /* Assumes rq->lock is held */
1352 static void rq_offline_rt(struct rq *rq)
1353 {
1354         if (rq->rt.overloaded)
1355                 rt_clear_overload(rq);
1356
1357         __disable_runtime(rq);
1358
1359         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1360 }
1361
1362 /*
1363  * When switch from the rt queue, we bring ourselves to a position
1364  * that we might want to pull RT tasks from other runqueues.
1365  */
1366 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1367                            int running)
1368 {
1369         /*
1370          * If there are other RT tasks then we will reschedule
1371          * and the scheduling of the other RT tasks will handle
1372          * the balancing. But if we are the last RT task
1373          * we may need to handle the pulling of RT tasks
1374          * now.
1375          */
1376         if (!rq->rt.rt_nr_running)
1377                 pull_rt_task(rq);
1378 }
1379 #endif /* CONFIG_SMP */
1380
1381 /*
1382  * When switching a task to RT, we may overload the runqueue
1383  * with RT tasks. In this case we try to push them off to
1384  * other runqueues.
1385  */
1386 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1387                            int running)
1388 {
1389         int check_resched = 1;
1390
1391         /*
1392          * If we are already running, then there's nothing
1393          * that needs to be done. But if we are not running
1394          * we may need to preempt the current running task.
1395          * If that current running task is also an RT task
1396          * then see if we can move to another run queue.
1397          */
1398         if (!running) {
1399 #ifdef CONFIG_SMP
1400                 if (rq->rt.overloaded && push_rt_task(rq) &&
1401                     /* Don't resched if we changed runqueues */
1402                     rq != task_rq(p))
1403                         check_resched = 0;
1404 #endif /* CONFIG_SMP */
1405                 if (check_resched && p->prio < rq->curr->prio)
1406                         resched_task(rq->curr);
1407         }
1408 }
1409
1410 /*
1411  * Priority of the task has changed. This may cause
1412  * us to initiate a push or pull.
1413  */
1414 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1415                             int oldprio, int running)
1416 {
1417         if (running) {
1418 #ifdef CONFIG_SMP
1419                 /*
1420                  * If our priority decreases while running, we
1421                  * may need to pull tasks to this runqueue.
1422                  */
1423                 if (oldprio < p->prio)
1424                         pull_rt_task(rq);
1425                 /*
1426                  * If there's a higher priority task waiting to run
1427                  * then reschedule. Note, the above pull_rt_task
1428                  * can release the rq lock and p could migrate.
1429                  * Only reschedule if p is still on the same runqueue.
1430                  */
1431                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1432                         resched_task(p);
1433 #else
1434                 /* For UP simply resched on drop of prio */
1435                 if (oldprio < p->prio)
1436                         resched_task(p);
1437 #endif /* CONFIG_SMP */
1438         } else {
1439                 /*
1440                  * This task is not running, but if it is
1441                  * greater than the current running task
1442                  * then reschedule.
1443                  */
1444                 if (p->prio < rq->curr->prio)
1445                         resched_task(rq->curr);
1446         }
1447 }
1448
1449 static void watchdog(struct rq *rq, struct task_struct *p)
1450 {
1451         unsigned long soft, hard;
1452
1453         if (!p->signal)
1454                 return;
1455
1456         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1457         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1458
1459         if (soft != RLIM_INFINITY) {
1460                 unsigned long next;
1461
1462                 p->rt.timeout++;
1463                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1464                 if (p->rt.timeout > next)
1465                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1466         }
1467 }
1468
1469 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1470 {
1471         update_curr_rt(rq);
1472
1473         watchdog(rq, p);
1474
1475         /*
1476          * RR tasks need a special form of timeslice management.
1477          * FIFO tasks have no timeslices.
1478          */
1479         if (p->policy != SCHED_RR)
1480                 return;
1481
1482         if (--p->rt.time_slice)
1483                 return;
1484
1485         p->rt.time_slice = DEF_TIMESLICE;
1486
1487         /*
1488          * Requeue to the end of queue if we are not the only element
1489          * on the queue:
1490          */
1491         if (p->rt.run_list.prev != p->rt.run_list.next) {
1492                 requeue_task_rt(rq, p, 0);
1493                 set_tsk_need_resched(p);
1494         }
1495 }
1496
1497 static void set_curr_task_rt(struct rq *rq)
1498 {
1499         struct task_struct *p = rq->curr;
1500
1501         p->se.exec_start = rq->clock;
1502 }
1503
1504 static const struct sched_class rt_sched_class = {
1505         .next                   = &fair_sched_class,
1506         .enqueue_task           = enqueue_task_rt,
1507         .dequeue_task           = dequeue_task_rt,
1508         .yield_task             = yield_task_rt,
1509
1510         .check_preempt_curr     = check_preempt_curr_rt,
1511
1512         .pick_next_task         = pick_next_task_rt,
1513         .put_prev_task          = put_prev_task_rt,
1514
1515 #ifdef CONFIG_SMP
1516         .select_task_rq         = select_task_rq_rt,
1517
1518         .load_balance           = load_balance_rt,
1519         .move_one_task          = move_one_task_rt,
1520         .set_cpus_allowed       = set_cpus_allowed_rt,
1521         .rq_online              = rq_online_rt,
1522         .rq_offline             = rq_offline_rt,
1523         .pre_schedule           = pre_schedule_rt,
1524         .post_schedule          = post_schedule_rt,
1525         .task_wake_up           = task_wake_up_rt,
1526         .switched_from          = switched_from_rt,
1527 #endif
1528
1529         .set_curr_task          = set_curr_task_rt,
1530         .task_tick              = task_tick_rt,
1531
1532         .prio_changed           = prio_changed_rt,
1533         .switched_to            = switched_to_rt,
1534 };
1535
1536 #ifdef CONFIG_SCHED_DEBUG
1537 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1538
1539 static void print_rt_stats(struct seq_file *m, int cpu)
1540 {
1541         struct rt_rq *rt_rq;
1542
1543         rcu_read_lock();
1544         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1545                 print_rt_rq(m, cpu, rt_rq);
1546         rcu_read_unlock();
1547 }
1548 #endif /* CONFIG_SCHED_DEBUG */