sched: cleanup inc/dec_rt_tasks
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         if (!rq->online)
16                 return;
17
18         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
19         /*
20          * Make sure the mask is visible before we set
21          * the overload count. That is checked to determine
22          * if we should look at the mask. It would be a shame
23          * if we looked at the mask, but the mask was not
24          * updated yet.
25          */
26         wmb();
27         atomic_inc(&rq->rd->rto_count);
28 }
29
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         if (!rq->online)
33                 return;
34
35         /* the order here really doesn't matter */
36         atomic_dec(&rq->rd->rto_count);
37         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38 }
39
40 static void update_rt_migration(struct rq *rq)
41 {
42         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43                 if (!rq->rt.overloaded) {
44                         rt_set_overload(rq);
45                         rq->rt.overloaded = 1;
46                 }
47         } else if (rq->rt.overloaded) {
48                 rt_clear_overload(rq);
49                 rq->rt.overloaded = 0;
50         }
51 }
52 #endif /* CONFIG_SMP */
53
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55 {
56         return container_of(rt_se, struct task_struct, rt);
57 }
58
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60 {
61         return !list_empty(&rt_se->run_list);
62 }
63
64 #ifdef CONFIG_RT_GROUP_SCHED
65
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67 {
68         if (!rt_rq->tg)
69                 return RUNTIME_INF;
70
71         return rt_rq->rt_runtime;
72 }
73
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75 {
76         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77 }
78
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83 {
84         return rt_rq->rq;
85 }
86
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88 {
89         return rt_se->rt_rq;
90 }
91
92 #define for_each_sched_rt_entity(rt_se) \
93         for (; rt_se; rt_se = rt_se->parent)
94
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96 {
97         return rt_se->my_q;
98 }
99
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104 {
105         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
106         struct sched_rt_entity *rt_se = rt_rq->rt_se;
107
108         if (rt_rq->rt_nr_running) {
109                 if (rt_se && !on_rt_rq(rt_se))
110                         enqueue_rt_entity(rt_se);
111                 if (rt_rq->highest_prio < curr->prio)
112                         resched_task(curr);
113         }
114 }
115
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117 {
118         struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120         if (rt_se && on_rt_rq(rt_se))
121                 dequeue_rt_entity(rt_se);
122 }
123
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125 {
126         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127 }
128
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
130 {
131         struct rt_rq *rt_rq = group_rt_rq(rt_se);
132         struct task_struct *p;
133
134         if (rt_rq)
135                 return !!rt_rq->rt_nr_boosted;
136
137         p = rt_task_of(rt_se);
138         return p->prio != p->normal_prio;
139 }
140
141 #ifdef CONFIG_SMP
142 static inline const struct cpumask *sched_rt_period_mask(void)
143 {
144         return cpu_rq(smp_processor_id())->rd->span;
145 }
146 #else
147 static inline const struct cpumask *sched_rt_period_mask(void)
148 {
149         return cpu_online_mask;
150 }
151 #endif
152
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155 {
156         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157 }
158
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160 {
161         return &rt_rq->tg->rt_bandwidth;
162 }
163
164 #else /* !CONFIG_RT_GROUP_SCHED */
165
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167 {
168         return rt_rq->rt_runtime;
169 }
170
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172 {
173         return ktime_to_ns(def_rt_bandwidth.rt_period);
174 }
175
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181         return container_of(rt_rq, struct rq, rt);
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         struct task_struct *p = rt_task_of(rt_se);
187         struct rq *rq = task_rq(p);
188
189         return &rq->rt;
190 }
191
192 #define for_each_sched_rt_entity(rt_se) \
193         for (; rt_se; rt_se = NULL)
194
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196 {
197         return NULL;
198 }
199
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         if (rt_rq->rt_nr_running)
203                 resched_task(rq_of_rt_rq(rt_rq)->curr);
204 }
205
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
207 {
208 }
209
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
211 {
212         return rt_rq->rt_throttled;
213 }
214
215 static inline const struct cpumask *sched_rt_period_mask(void)
216 {
217         return cpu_online_mask;
218 }
219
220 static inline
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
222 {
223         return &cpu_rq(cpu)->rt;
224 }
225
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
227 {
228         return &def_rt_bandwidth;
229 }
230
231 #endif /* CONFIG_RT_GROUP_SCHED */
232
233 #ifdef CONFIG_SMP
234 /*
235  * We ran out of runtime, see if we can borrow some from our neighbours.
236  */
237 static int do_balance_runtime(struct rt_rq *rt_rq)
238 {
239         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
240         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
241         int i, weight, more = 0;
242         u64 rt_period;
243
244         weight = cpumask_weight(rd->span);
245
246         spin_lock(&rt_b->rt_runtime_lock);
247         rt_period = ktime_to_ns(rt_b->rt_period);
248         for_each_cpu(i, rd->span) {
249                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
250                 s64 diff;
251
252                 if (iter == rt_rq)
253                         continue;
254
255                 spin_lock(&iter->rt_runtime_lock);
256                 /*
257                  * Either all rqs have inf runtime and there's nothing to steal
258                  * or __disable_runtime() below sets a specific rq to inf to
259                  * indicate its been disabled and disalow stealing.
260                  */
261                 if (iter->rt_runtime == RUNTIME_INF)
262                         goto next;
263
264                 /*
265                  * From runqueues with spare time, take 1/n part of their
266                  * spare time, but no more than our period.
267                  */
268                 diff = iter->rt_runtime - iter->rt_time;
269                 if (diff > 0) {
270                         diff = div_u64((u64)diff, weight);
271                         if (rt_rq->rt_runtime + diff > rt_period)
272                                 diff = rt_period - rt_rq->rt_runtime;
273                         iter->rt_runtime -= diff;
274                         rt_rq->rt_runtime += diff;
275                         more = 1;
276                         if (rt_rq->rt_runtime == rt_period) {
277                                 spin_unlock(&iter->rt_runtime_lock);
278                                 break;
279                         }
280                 }
281 next:
282                 spin_unlock(&iter->rt_runtime_lock);
283         }
284         spin_unlock(&rt_b->rt_runtime_lock);
285
286         return more;
287 }
288
289 /*
290  * Ensure this RQ takes back all the runtime it lend to its neighbours.
291  */
292 static void __disable_runtime(struct rq *rq)
293 {
294         struct root_domain *rd = rq->rd;
295         struct rt_rq *rt_rq;
296
297         if (unlikely(!scheduler_running))
298                 return;
299
300         for_each_leaf_rt_rq(rt_rq, rq) {
301                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
302                 s64 want;
303                 int i;
304
305                 spin_lock(&rt_b->rt_runtime_lock);
306                 spin_lock(&rt_rq->rt_runtime_lock);
307                 /*
308                  * Either we're all inf and nobody needs to borrow, or we're
309                  * already disabled and thus have nothing to do, or we have
310                  * exactly the right amount of runtime to take out.
311                  */
312                 if (rt_rq->rt_runtime == RUNTIME_INF ||
313                                 rt_rq->rt_runtime == rt_b->rt_runtime)
314                         goto balanced;
315                 spin_unlock(&rt_rq->rt_runtime_lock);
316
317                 /*
318                  * Calculate the difference between what we started out with
319                  * and what we current have, that's the amount of runtime
320                  * we lend and now have to reclaim.
321                  */
322                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
323
324                 /*
325                  * Greedy reclaim, take back as much as we can.
326                  */
327                 for_each_cpu(i, rd->span) {
328                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
329                         s64 diff;
330
331                         /*
332                          * Can't reclaim from ourselves or disabled runqueues.
333                          */
334                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
335                                 continue;
336
337                         spin_lock(&iter->rt_runtime_lock);
338                         if (want > 0) {
339                                 diff = min_t(s64, iter->rt_runtime, want);
340                                 iter->rt_runtime -= diff;
341                                 want -= diff;
342                         } else {
343                                 iter->rt_runtime -= want;
344                                 want -= want;
345                         }
346                         spin_unlock(&iter->rt_runtime_lock);
347
348                         if (!want)
349                                 break;
350                 }
351
352                 spin_lock(&rt_rq->rt_runtime_lock);
353                 /*
354                  * We cannot be left wanting - that would mean some runtime
355                  * leaked out of the system.
356                  */
357                 BUG_ON(want);
358 balanced:
359                 /*
360                  * Disable all the borrow logic by pretending we have inf
361                  * runtime - in which case borrowing doesn't make sense.
362                  */
363                 rt_rq->rt_runtime = RUNTIME_INF;
364                 spin_unlock(&rt_rq->rt_runtime_lock);
365                 spin_unlock(&rt_b->rt_runtime_lock);
366         }
367 }
368
369 static void disable_runtime(struct rq *rq)
370 {
371         unsigned long flags;
372
373         spin_lock_irqsave(&rq->lock, flags);
374         __disable_runtime(rq);
375         spin_unlock_irqrestore(&rq->lock, flags);
376 }
377
378 static void __enable_runtime(struct rq *rq)
379 {
380         struct rt_rq *rt_rq;
381
382         if (unlikely(!scheduler_running))
383                 return;
384
385         /*
386          * Reset each runqueue's bandwidth settings
387          */
388         for_each_leaf_rt_rq(rt_rq, rq) {
389                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
390
391                 spin_lock(&rt_b->rt_runtime_lock);
392                 spin_lock(&rt_rq->rt_runtime_lock);
393                 rt_rq->rt_runtime = rt_b->rt_runtime;
394                 rt_rq->rt_time = 0;
395                 rt_rq->rt_throttled = 0;
396                 spin_unlock(&rt_rq->rt_runtime_lock);
397                 spin_unlock(&rt_b->rt_runtime_lock);
398         }
399 }
400
401 static void enable_runtime(struct rq *rq)
402 {
403         unsigned long flags;
404
405         spin_lock_irqsave(&rq->lock, flags);
406         __enable_runtime(rq);
407         spin_unlock_irqrestore(&rq->lock, flags);
408 }
409
410 static int balance_runtime(struct rt_rq *rt_rq)
411 {
412         int more = 0;
413
414         if (rt_rq->rt_time > rt_rq->rt_runtime) {
415                 spin_unlock(&rt_rq->rt_runtime_lock);
416                 more = do_balance_runtime(rt_rq);
417                 spin_lock(&rt_rq->rt_runtime_lock);
418         }
419
420         return more;
421 }
422 #else /* !CONFIG_SMP */
423 static inline int balance_runtime(struct rt_rq *rt_rq)
424 {
425         return 0;
426 }
427 #endif /* CONFIG_SMP */
428
429 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
430 {
431         int i, idle = 1;
432         const struct cpumask *span;
433
434         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
435                 return 1;
436
437         span = sched_rt_period_mask();
438         for_each_cpu(i, span) {
439                 int enqueue = 0;
440                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
441                 struct rq *rq = rq_of_rt_rq(rt_rq);
442
443                 spin_lock(&rq->lock);
444                 if (rt_rq->rt_time) {
445                         u64 runtime;
446
447                         spin_lock(&rt_rq->rt_runtime_lock);
448                         if (rt_rq->rt_throttled)
449                                 balance_runtime(rt_rq);
450                         runtime = rt_rq->rt_runtime;
451                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
452                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
453                                 rt_rq->rt_throttled = 0;
454                                 enqueue = 1;
455                         }
456                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
457                                 idle = 0;
458                         spin_unlock(&rt_rq->rt_runtime_lock);
459                 } else if (rt_rq->rt_nr_running)
460                         idle = 0;
461
462                 if (enqueue)
463                         sched_rt_rq_enqueue(rt_rq);
464                 spin_unlock(&rq->lock);
465         }
466
467         return idle;
468 }
469
470 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
471 {
472 #ifdef CONFIG_RT_GROUP_SCHED
473         struct rt_rq *rt_rq = group_rt_rq(rt_se);
474
475         if (rt_rq)
476                 return rt_rq->highest_prio;
477 #endif
478
479         return rt_task_of(rt_se)->prio;
480 }
481
482 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
483 {
484         u64 runtime = sched_rt_runtime(rt_rq);
485
486         if (rt_rq->rt_throttled)
487                 return rt_rq_throttled(rt_rq);
488
489         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
490                 return 0;
491
492         balance_runtime(rt_rq);
493         runtime = sched_rt_runtime(rt_rq);
494         if (runtime == RUNTIME_INF)
495                 return 0;
496
497         if (rt_rq->rt_time > runtime) {
498                 rt_rq->rt_throttled = 1;
499                 if (rt_rq_throttled(rt_rq)) {
500                         sched_rt_rq_dequeue(rt_rq);
501                         return 1;
502                 }
503         }
504
505         return 0;
506 }
507
508 /*
509  * Update the current task's runtime statistics. Skip current tasks that
510  * are not in our scheduling class.
511  */
512 static void update_curr_rt(struct rq *rq)
513 {
514         struct task_struct *curr = rq->curr;
515         struct sched_rt_entity *rt_se = &curr->rt;
516         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
517         u64 delta_exec;
518
519         if (!task_has_rt_policy(curr))
520                 return;
521
522         delta_exec = rq->clock - curr->se.exec_start;
523         if (unlikely((s64)delta_exec < 0))
524                 delta_exec = 0;
525
526         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
527
528         curr->se.sum_exec_runtime += delta_exec;
529         account_group_exec_runtime(curr, delta_exec);
530
531         curr->se.exec_start = rq->clock;
532         cpuacct_charge(curr, delta_exec);
533
534         if (!rt_bandwidth_enabled())
535                 return;
536
537         for_each_sched_rt_entity(rt_se) {
538                 rt_rq = rt_rq_of_se(rt_se);
539
540                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
541                         spin_lock(&rt_rq->rt_runtime_lock);
542                         rt_rq->rt_time += delta_exec;
543                         if (sched_rt_runtime_exceeded(rt_rq))
544                                 resched_task(curr);
545                         spin_unlock(&rt_rq->rt_runtime_lock);
546                 }
547         }
548 }
549
550 static inline
551 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
552 {
553         int prio = rt_se_prio(rt_se);
554 #ifdef CONFIG_SMP
555         struct rq *rq = rq_of_rt_rq(rt_rq);
556 #endif
557
558         WARN_ON(!rt_prio(prio));
559         rt_rq->rt_nr_running++;
560 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
561         if (prio < rt_rq->highest_prio) {
562
563                 rt_rq->highest_prio = prio;
564 #ifdef CONFIG_SMP
565                 if (rq->online)
566                         cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
567 #endif
568         }
569 #endif
570 #ifdef CONFIG_SMP
571         if (rt_se->nr_cpus_allowed > 1)
572                 rq->rt.rt_nr_migratory++;
573
574         update_rt_migration(rq);
575 #endif
576 #ifdef CONFIG_RT_GROUP_SCHED
577         if (rt_se_boosted(rt_se))
578                 rt_rq->rt_nr_boosted++;
579
580         if (rt_rq->tg)
581                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
582 #else
583         start_rt_bandwidth(&def_rt_bandwidth);
584 #endif
585 }
586
587 static inline
588 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
589 {
590 #ifdef CONFIG_SMP
591         struct rq *rq = rq_of_rt_rq(rt_rq);
592         int highest_prio = rt_rq->highest_prio;
593 #endif
594
595         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
596         WARN_ON(!rt_rq->rt_nr_running);
597         rt_rq->rt_nr_running--;
598 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
599         if (rt_rq->rt_nr_running) {
600                 struct rt_prio_array *array;
601
602                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
603                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
604                         /* recalculate */
605                         array = &rt_rq->active;
606                         rt_rq->highest_prio =
607                                 sched_find_first_bit(array->bitmap);
608                 } /* otherwise leave rq->highest prio alone */
609         } else
610                 rt_rq->highest_prio = MAX_RT_PRIO;
611 #endif
612 #ifdef CONFIG_SMP
613         if (rt_se->nr_cpus_allowed > 1)
614                 rq->rt.rt_nr_migratory--;
615
616         if (rq->online && rt_rq->highest_prio != highest_prio)
617                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio);
618
619         update_rt_migration(rq);
620 #endif /* CONFIG_SMP */
621 #ifdef CONFIG_RT_GROUP_SCHED
622         if (rt_se_boosted(rt_se))
623                 rt_rq->rt_nr_boosted--;
624
625         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
626 #endif
627 }
628
629 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
630 {
631         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
632         struct rt_prio_array *array = &rt_rq->active;
633         struct rt_rq *group_rq = group_rt_rq(rt_se);
634         struct list_head *queue = array->queue + rt_se_prio(rt_se);
635
636         /*
637          * Don't enqueue the group if its throttled, or when empty.
638          * The latter is a consequence of the former when a child group
639          * get throttled and the current group doesn't have any other
640          * active members.
641          */
642         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
643                 return;
644
645         list_add_tail(&rt_se->run_list, queue);
646         __set_bit(rt_se_prio(rt_se), array->bitmap);
647
648         inc_rt_tasks(rt_se, rt_rq);
649 }
650
651 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
652 {
653         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
654         struct rt_prio_array *array = &rt_rq->active;
655
656         list_del_init(&rt_se->run_list);
657         if (list_empty(array->queue + rt_se_prio(rt_se)))
658                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
659
660         dec_rt_tasks(rt_se, rt_rq);
661 }
662
663 /*
664  * Because the prio of an upper entry depends on the lower
665  * entries, we must remove entries top - down.
666  */
667 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
668 {
669         struct sched_rt_entity *back = NULL;
670
671         for_each_sched_rt_entity(rt_se) {
672                 rt_se->back = back;
673                 back = rt_se;
674         }
675
676         for (rt_se = back; rt_se; rt_se = rt_se->back) {
677                 if (on_rt_rq(rt_se))
678                         __dequeue_rt_entity(rt_se);
679         }
680 }
681
682 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
683 {
684         dequeue_rt_stack(rt_se);
685         for_each_sched_rt_entity(rt_se)
686                 __enqueue_rt_entity(rt_se);
687 }
688
689 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
690 {
691         dequeue_rt_stack(rt_se);
692
693         for_each_sched_rt_entity(rt_se) {
694                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
695
696                 if (rt_rq && rt_rq->rt_nr_running)
697                         __enqueue_rt_entity(rt_se);
698         }
699 }
700
701 /*
702  * Adding/removing a task to/from a priority array:
703  */
704 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
705 {
706         struct sched_rt_entity *rt_se = &p->rt;
707
708         if (wakeup)
709                 rt_se->timeout = 0;
710
711         enqueue_rt_entity(rt_se);
712
713         inc_cpu_load(rq, p->se.load.weight);
714 }
715
716 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
717 {
718         struct sched_rt_entity *rt_se = &p->rt;
719
720         update_curr_rt(rq);
721         dequeue_rt_entity(rt_se);
722
723         dec_cpu_load(rq, p->se.load.weight);
724 }
725
726 /*
727  * Put task to the end of the run list without the overhead of dequeue
728  * followed by enqueue.
729  */
730 static void
731 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
732 {
733         if (on_rt_rq(rt_se)) {
734                 struct rt_prio_array *array = &rt_rq->active;
735                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
736
737                 if (head)
738                         list_move(&rt_se->run_list, queue);
739                 else
740                         list_move_tail(&rt_se->run_list, queue);
741         }
742 }
743
744 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
745 {
746         struct sched_rt_entity *rt_se = &p->rt;
747         struct rt_rq *rt_rq;
748
749         for_each_sched_rt_entity(rt_se) {
750                 rt_rq = rt_rq_of_se(rt_se);
751                 requeue_rt_entity(rt_rq, rt_se, head);
752         }
753 }
754
755 static void yield_task_rt(struct rq *rq)
756 {
757         requeue_task_rt(rq, rq->curr, 0);
758 }
759
760 #ifdef CONFIG_SMP
761 static int find_lowest_rq(struct task_struct *task);
762
763 static int select_task_rq_rt(struct task_struct *p, int sync)
764 {
765         struct rq *rq = task_rq(p);
766
767         /*
768          * If the current task is an RT task, then
769          * try to see if we can wake this RT task up on another
770          * runqueue. Otherwise simply start this RT task
771          * on its current runqueue.
772          *
773          * We want to avoid overloading runqueues. Even if
774          * the RT task is of higher priority than the current RT task.
775          * RT tasks behave differently than other tasks. If
776          * one gets preempted, we try to push it off to another queue.
777          * So trying to keep a preempting RT task on the same
778          * cache hot CPU will force the running RT task to
779          * a cold CPU. So we waste all the cache for the lower
780          * RT task in hopes of saving some of a RT task
781          * that is just being woken and probably will have
782          * cold cache anyway.
783          */
784         if (unlikely(rt_task(rq->curr)) &&
785             (p->rt.nr_cpus_allowed > 1)) {
786                 int cpu = find_lowest_rq(p);
787
788                 return (cpu == -1) ? task_cpu(p) : cpu;
789         }
790
791         /*
792          * Otherwise, just let it ride on the affined RQ and the
793          * post-schedule router will push the preempted task away
794          */
795         return task_cpu(p);
796 }
797
798 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
799 {
800         cpumask_var_t mask;
801
802         if (rq->curr->rt.nr_cpus_allowed == 1)
803                 return;
804
805         if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
806                 return;
807
808         if (p->rt.nr_cpus_allowed != 1
809             && cpupri_find(&rq->rd->cpupri, p, mask))
810                 goto free;
811
812         if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
813                 goto free;
814
815         /*
816          * There appears to be other cpus that can accept
817          * current and none to run 'p', so lets reschedule
818          * to try and push current away:
819          */
820         requeue_task_rt(rq, p, 1);
821         resched_task(rq->curr);
822 free:
823         free_cpumask_var(mask);
824 }
825
826 #endif /* CONFIG_SMP */
827
828 /*
829  * Preempt the current task with a newly woken task if needed:
830  */
831 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
832 {
833         if (p->prio < rq->curr->prio) {
834                 resched_task(rq->curr);
835                 return;
836         }
837
838 #ifdef CONFIG_SMP
839         /*
840          * If:
841          *
842          * - the newly woken task is of equal priority to the current task
843          * - the newly woken task is non-migratable while current is migratable
844          * - current will be preempted on the next reschedule
845          *
846          * we should check to see if current can readily move to a different
847          * cpu.  If so, we will reschedule to allow the push logic to try
848          * to move current somewhere else, making room for our non-migratable
849          * task.
850          */
851         if (p->prio == rq->curr->prio && !need_resched())
852                 check_preempt_equal_prio(rq, p);
853 #endif
854 }
855
856 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
857                                                    struct rt_rq *rt_rq)
858 {
859         struct rt_prio_array *array = &rt_rq->active;
860         struct sched_rt_entity *next = NULL;
861         struct list_head *queue;
862         int idx;
863
864         idx = sched_find_first_bit(array->bitmap);
865         BUG_ON(idx >= MAX_RT_PRIO);
866
867         queue = array->queue + idx;
868         next = list_entry(queue->next, struct sched_rt_entity, run_list);
869
870         return next;
871 }
872
873 static struct task_struct *pick_next_task_rt(struct rq *rq)
874 {
875         struct sched_rt_entity *rt_se;
876         struct task_struct *p;
877         struct rt_rq *rt_rq;
878
879         rt_rq = &rq->rt;
880
881         if (unlikely(!rt_rq->rt_nr_running))
882                 return NULL;
883
884         if (rt_rq_throttled(rt_rq))
885                 return NULL;
886
887         do {
888                 rt_se = pick_next_rt_entity(rq, rt_rq);
889                 BUG_ON(!rt_se);
890                 rt_rq = group_rt_rq(rt_se);
891         } while (rt_rq);
892
893         p = rt_task_of(rt_se);
894         p->se.exec_start = rq->clock;
895         return p;
896 }
897
898 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
899 {
900         update_curr_rt(rq);
901         p->se.exec_start = 0;
902 }
903
904 #ifdef CONFIG_SMP
905
906 /* Only try algorithms three times */
907 #define RT_MAX_TRIES 3
908
909 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
910
911 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
912 {
913         if (!task_running(rq, p) &&
914             (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
915             (p->rt.nr_cpus_allowed > 1))
916                 return 1;
917         return 0;
918 }
919
920 /* Return the second highest RT task, NULL otherwise */
921 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
922 {
923         struct task_struct *next = NULL;
924         struct sched_rt_entity *rt_se;
925         struct rt_prio_array *array;
926         struct rt_rq *rt_rq;
927         int idx;
928
929         for_each_leaf_rt_rq(rt_rq, rq) {
930                 array = &rt_rq->active;
931                 idx = sched_find_first_bit(array->bitmap);
932  next_idx:
933                 if (idx >= MAX_RT_PRIO)
934                         continue;
935                 if (next && next->prio < idx)
936                         continue;
937                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
938                         struct task_struct *p = rt_task_of(rt_se);
939                         if (pick_rt_task(rq, p, cpu)) {
940                                 next = p;
941                                 break;
942                         }
943                 }
944                 if (!next) {
945                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
946                         goto next_idx;
947                 }
948         }
949
950         return next;
951 }
952
953 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
954
955 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
956 {
957         int first;
958
959         /* "this_cpu" is cheaper to preempt than a remote processor */
960         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
961                 return this_cpu;
962
963         first = first_cpu(*mask);
964         if (first != NR_CPUS)
965                 return first;
966
967         return -1;
968 }
969
970 static int find_lowest_rq(struct task_struct *task)
971 {
972         struct sched_domain *sd;
973         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
974         int this_cpu = smp_processor_id();
975         int cpu      = task_cpu(task);
976
977         if (task->rt.nr_cpus_allowed == 1)
978                 return -1; /* No other targets possible */
979
980         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
981                 return -1; /* No targets found */
982
983         /*
984          * Only consider CPUs that are usable for migration.
985          * I guess we might want to change cpupri_find() to ignore those
986          * in the first place.
987          */
988         cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
989
990         /*
991          * At this point we have built a mask of cpus representing the
992          * lowest priority tasks in the system.  Now we want to elect
993          * the best one based on our affinity and topology.
994          *
995          * We prioritize the last cpu that the task executed on since
996          * it is most likely cache-hot in that location.
997          */
998         if (cpumask_test_cpu(cpu, lowest_mask))
999                 return cpu;
1000
1001         /*
1002          * Otherwise, we consult the sched_domains span maps to figure
1003          * out which cpu is logically closest to our hot cache data.
1004          */
1005         if (this_cpu == cpu)
1006                 this_cpu = -1; /* Skip this_cpu opt if the same */
1007
1008         for_each_domain(cpu, sd) {
1009                 if (sd->flags & SD_WAKE_AFFINE) {
1010                         cpumask_t domain_mask;
1011                         int       best_cpu;
1012
1013                         cpumask_and(&domain_mask, sched_domain_span(sd),
1014                                     lowest_mask);
1015
1016                         best_cpu = pick_optimal_cpu(this_cpu,
1017                                                     &domain_mask);
1018                         if (best_cpu != -1)
1019                                 return best_cpu;
1020                 }
1021         }
1022
1023         /*
1024          * And finally, if there were no matches within the domains
1025          * just give the caller *something* to work with from the compatible
1026          * locations.
1027          */
1028         return pick_optimal_cpu(this_cpu, lowest_mask);
1029 }
1030
1031 /* Will lock the rq it finds */
1032 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1033 {
1034         struct rq *lowest_rq = NULL;
1035         int tries;
1036         int cpu;
1037
1038         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1039                 cpu = find_lowest_rq(task);
1040
1041                 if ((cpu == -1) || (cpu == rq->cpu))
1042                         break;
1043
1044                 lowest_rq = cpu_rq(cpu);
1045
1046                 /* if the prio of this runqueue changed, try again */
1047                 if (double_lock_balance(rq, lowest_rq)) {
1048                         /*
1049                          * We had to unlock the run queue. In
1050                          * the mean time, task could have
1051                          * migrated already or had its affinity changed.
1052                          * Also make sure that it wasn't scheduled on its rq.
1053                          */
1054                         if (unlikely(task_rq(task) != rq ||
1055                                      !cpumask_test_cpu(lowest_rq->cpu,
1056                                                        &task->cpus_allowed) ||
1057                                      task_running(rq, task) ||
1058                                      !task->se.on_rq)) {
1059
1060                                 spin_unlock(&lowest_rq->lock);
1061                                 lowest_rq = NULL;
1062                                 break;
1063                         }
1064                 }
1065
1066                 /* If this rq is still suitable use it. */
1067                 if (lowest_rq->rt.highest_prio > task->prio)
1068                         break;
1069
1070                 /* try again */
1071                 double_unlock_balance(rq, lowest_rq);
1072                 lowest_rq = NULL;
1073         }
1074
1075         return lowest_rq;
1076 }
1077
1078 /*
1079  * If the current CPU has more than one RT task, see if the non
1080  * running task can migrate over to a CPU that is running a task
1081  * of lesser priority.
1082  */
1083 static int push_rt_task(struct rq *rq)
1084 {
1085         struct task_struct *next_task;
1086         struct rq *lowest_rq;
1087         int ret = 0;
1088         int paranoid = RT_MAX_TRIES;
1089
1090         if (!rq->rt.overloaded)
1091                 return 0;
1092
1093         next_task = pick_next_highest_task_rt(rq, -1);
1094         if (!next_task)
1095                 return 0;
1096
1097  retry:
1098         if (unlikely(next_task == rq->curr)) {
1099                 WARN_ON(1);
1100                 return 0;
1101         }
1102
1103         /*
1104          * It's possible that the next_task slipped in of
1105          * higher priority than current. If that's the case
1106          * just reschedule current.
1107          */
1108         if (unlikely(next_task->prio < rq->curr->prio)) {
1109                 resched_task(rq->curr);
1110                 return 0;
1111         }
1112
1113         /* We might release rq lock */
1114         get_task_struct(next_task);
1115
1116         /* find_lock_lowest_rq locks the rq if found */
1117         lowest_rq = find_lock_lowest_rq(next_task, rq);
1118         if (!lowest_rq) {
1119                 struct task_struct *task;
1120                 /*
1121                  * find lock_lowest_rq releases rq->lock
1122                  * so it is possible that next_task has changed.
1123                  * If it has, then try again.
1124                  */
1125                 task = pick_next_highest_task_rt(rq, -1);
1126                 if (unlikely(task != next_task) && task && paranoid--) {
1127                         put_task_struct(next_task);
1128                         next_task = task;
1129                         goto retry;
1130                 }
1131                 goto out;
1132         }
1133
1134         deactivate_task(rq, next_task, 0);
1135         set_task_cpu(next_task, lowest_rq->cpu);
1136         activate_task(lowest_rq, next_task, 0);
1137
1138         resched_task(lowest_rq->curr);
1139
1140         double_unlock_balance(rq, lowest_rq);
1141
1142         ret = 1;
1143 out:
1144         put_task_struct(next_task);
1145
1146         return ret;
1147 }
1148
1149 /*
1150  * TODO: Currently we just use the second highest prio task on
1151  *       the queue, and stop when it can't migrate (or there's
1152  *       no more RT tasks).  There may be a case where a lower
1153  *       priority RT task has a different affinity than the
1154  *       higher RT task. In this case the lower RT task could
1155  *       possibly be able to migrate where as the higher priority
1156  *       RT task could not.  We currently ignore this issue.
1157  *       Enhancements are welcome!
1158  */
1159 static void push_rt_tasks(struct rq *rq)
1160 {
1161         /* push_rt_task will return true if it moved an RT */
1162         while (push_rt_task(rq))
1163                 ;
1164 }
1165
1166 static int pull_rt_task(struct rq *this_rq)
1167 {
1168         int this_cpu = this_rq->cpu, ret = 0, cpu;
1169         struct task_struct *p, *next;
1170         struct rq *src_rq;
1171
1172         if (likely(!rt_overloaded(this_rq)))
1173                 return 0;
1174
1175         next = pick_next_task_rt(this_rq);
1176
1177         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1178                 if (this_cpu == cpu)
1179                         continue;
1180
1181                 src_rq = cpu_rq(cpu);
1182                 /*
1183                  * We can potentially drop this_rq's lock in
1184                  * double_lock_balance, and another CPU could
1185                  * steal our next task - hence we must cause
1186                  * the caller to recalculate the next task
1187                  * in that case:
1188                  */
1189                 if (double_lock_balance(this_rq, src_rq)) {
1190                         struct task_struct *old_next = next;
1191
1192                         next = pick_next_task_rt(this_rq);
1193                         if (next != old_next)
1194                                 ret = 1;
1195                 }
1196
1197                 /*
1198                  * Are there still pullable RT tasks?
1199                  */
1200                 if (src_rq->rt.rt_nr_running <= 1)
1201                         goto skip;
1202
1203                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1204
1205                 /*
1206                  * Do we have an RT task that preempts
1207                  * the to-be-scheduled task?
1208                  */
1209                 if (p && (!next || (p->prio < next->prio))) {
1210                         WARN_ON(p == src_rq->curr);
1211                         WARN_ON(!p->se.on_rq);
1212
1213                         /*
1214                          * There's a chance that p is higher in priority
1215                          * than what's currently running on its cpu.
1216                          * This is just that p is wakeing up and hasn't
1217                          * had a chance to schedule. We only pull
1218                          * p if it is lower in priority than the
1219                          * current task on the run queue or
1220                          * this_rq next task is lower in prio than
1221                          * the current task on that rq.
1222                          */
1223                         if (p->prio < src_rq->curr->prio ||
1224                             (next && next->prio < src_rq->curr->prio))
1225                                 goto skip;
1226
1227                         ret = 1;
1228
1229                         deactivate_task(src_rq, p, 0);
1230                         set_task_cpu(p, this_cpu);
1231                         activate_task(this_rq, p, 0);
1232                         /*
1233                          * We continue with the search, just in
1234                          * case there's an even higher prio task
1235                          * in another runqueue. (low likelyhood
1236                          * but possible)
1237                          *
1238                          * Update next so that we won't pick a task
1239                          * on another cpu with a priority lower (or equal)
1240                          * than the one we just picked.
1241                          */
1242                         next = p;
1243
1244                 }
1245  skip:
1246                 double_unlock_balance(this_rq, src_rq);
1247         }
1248
1249         return ret;
1250 }
1251
1252 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1253 {
1254         /* Try to pull RT tasks here if we lower this rq's prio */
1255         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1256                 pull_rt_task(rq);
1257 }
1258
1259 static void post_schedule_rt(struct rq *rq)
1260 {
1261         /*
1262          * If we have more than one rt_task queued, then
1263          * see if we can push the other rt_tasks off to other CPUS.
1264          * Note we may release the rq lock, and since
1265          * the lock was owned by prev, we need to release it
1266          * first via finish_lock_switch and then reaquire it here.
1267          */
1268         if (unlikely(rq->rt.overloaded)) {
1269                 spin_lock_irq(&rq->lock);
1270                 push_rt_tasks(rq);
1271                 spin_unlock_irq(&rq->lock);
1272         }
1273 }
1274
1275 /*
1276  * If we are not running and we are not going to reschedule soon, we should
1277  * try to push tasks away now
1278  */
1279 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1280 {
1281         if (!task_running(rq, p) &&
1282             !test_tsk_need_resched(rq->curr) &&
1283             rq->rt.overloaded)
1284                 push_rt_tasks(rq);
1285 }
1286
1287 static unsigned long
1288 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1289                 unsigned long max_load_move,
1290                 struct sched_domain *sd, enum cpu_idle_type idle,
1291                 int *all_pinned, int *this_best_prio)
1292 {
1293         /* don't touch RT tasks */
1294         return 0;
1295 }
1296
1297 static int
1298 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1299                  struct sched_domain *sd, enum cpu_idle_type idle)
1300 {
1301         /* don't touch RT tasks */
1302         return 0;
1303 }
1304
1305 static void set_cpus_allowed_rt(struct task_struct *p,
1306                                 const struct cpumask *new_mask)
1307 {
1308         int weight = cpumask_weight(new_mask);
1309
1310         BUG_ON(!rt_task(p));
1311
1312         /*
1313          * Update the migration status of the RQ if we have an RT task
1314          * which is running AND changing its weight value.
1315          */
1316         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1317                 struct rq *rq = task_rq(p);
1318
1319                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1320                         rq->rt.rt_nr_migratory++;
1321                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1322                         BUG_ON(!rq->rt.rt_nr_migratory);
1323                         rq->rt.rt_nr_migratory--;
1324                 }
1325
1326                 update_rt_migration(rq);
1327         }
1328
1329         cpumask_copy(&p->cpus_allowed, new_mask);
1330         p->rt.nr_cpus_allowed = weight;
1331 }
1332
1333 /* Assumes rq->lock is held */
1334 static void rq_online_rt(struct rq *rq)
1335 {
1336         if (rq->rt.overloaded)
1337                 rt_set_overload(rq);
1338
1339         __enable_runtime(rq);
1340
1341         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1342 }
1343
1344 /* Assumes rq->lock is held */
1345 static void rq_offline_rt(struct rq *rq)
1346 {
1347         if (rq->rt.overloaded)
1348                 rt_clear_overload(rq);
1349
1350         __disable_runtime(rq);
1351
1352         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1353 }
1354
1355 /*
1356  * When switch from the rt queue, we bring ourselves to a position
1357  * that we might want to pull RT tasks from other runqueues.
1358  */
1359 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1360                            int running)
1361 {
1362         /*
1363          * If there are other RT tasks then we will reschedule
1364          * and the scheduling of the other RT tasks will handle
1365          * the balancing. But if we are the last RT task
1366          * we may need to handle the pulling of RT tasks
1367          * now.
1368          */
1369         if (!rq->rt.rt_nr_running)
1370                 pull_rt_task(rq);
1371 }
1372
1373 static inline void init_sched_rt_class(void)
1374 {
1375         unsigned int i;
1376
1377         for_each_possible_cpu(i)
1378                 alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL);
1379 }
1380 #endif /* CONFIG_SMP */
1381
1382 /*
1383  * When switching a task to RT, we may overload the runqueue
1384  * with RT tasks. In this case we try to push them off to
1385  * other runqueues.
1386  */
1387 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1388                            int running)
1389 {
1390         int check_resched = 1;
1391
1392         /*
1393          * If we are already running, then there's nothing
1394          * that needs to be done. But if we are not running
1395          * we may need to preempt the current running task.
1396          * If that current running task is also an RT task
1397          * then see if we can move to another run queue.
1398          */
1399         if (!running) {
1400 #ifdef CONFIG_SMP
1401                 if (rq->rt.overloaded && push_rt_task(rq) &&
1402                     /* Don't resched if we changed runqueues */
1403                     rq != task_rq(p))
1404                         check_resched = 0;
1405 #endif /* CONFIG_SMP */
1406                 if (check_resched && p->prio < rq->curr->prio)
1407                         resched_task(rq->curr);
1408         }
1409 }
1410
1411 /*
1412  * Priority of the task has changed. This may cause
1413  * us to initiate a push or pull.
1414  */
1415 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1416                             int oldprio, int running)
1417 {
1418         if (running) {
1419 #ifdef CONFIG_SMP
1420                 /*
1421                  * If our priority decreases while running, we
1422                  * may need to pull tasks to this runqueue.
1423                  */
1424                 if (oldprio < p->prio)
1425                         pull_rt_task(rq);
1426                 /*
1427                  * If there's a higher priority task waiting to run
1428                  * then reschedule. Note, the above pull_rt_task
1429                  * can release the rq lock and p could migrate.
1430                  * Only reschedule if p is still on the same runqueue.
1431                  */
1432                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1433                         resched_task(p);
1434 #else
1435                 /* For UP simply resched on drop of prio */
1436                 if (oldprio < p->prio)
1437                         resched_task(p);
1438 #endif /* CONFIG_SMP */
1439         } else {
1440                 /*
1441                  * This task is not running, but if it is
1442                  * greater than the current running task
1443                  * then reschedule.
1444                  */
1445                 if (p->prio < rq->curr->prio)
1446                         resched_task(rq->curr);
1447         }
1448 }
1449
1450 static void watchdog(struct rq *rq, struct task_struct *p)
1451 {
1452         unsigned long soft, hard;
1453
1454         if (!p->signal)
1455                 return;
1456
1457         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1458         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1459
1460         if (soft != RLIM_INFINITY) {
1461                 unsigned long next;
1462
1463                 p->rt.timeout++;
1464                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1465                 if (p->rt.timeout > next)
1466                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1467         }
1468 }
1469
1470 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1471 {
1472         update_curr_rt(rq);
1473
1474         watchdog(rq, p);
1475
1476         /*
1477          * RR tasks need a special form of timeslice management.
1478          * FIFO tasks have no timeslices.
1479          */
1480         if (p->policy != SCHED_RR)
1481                 return;
1482
1483         if (--p->rt.time_slice)
1484                 return;
1485
1486         p->rt.time_slice = DEF_TIMESLICE;
1487
1488         /*
1489          * Requeue to the end of queue if we are not the only element
1490          * on the queue:
1491          */
1492         if (p->rt.run_list.prev != p->rt.run_list.next) {
1493                 requeue_task_rt(rq, p, 0);
1494                 set_tsk_need_resched(p);
1495         }
1496 }
1497
1498 static void set_curr_task_rt(struct rq *rq)
1499 {
1500         struct task_struct *p = rq->curr;
1501
1502         p->se.exec_start = rq->clock;
1503 }
1504
1505 static const struct sched_class rt_sched_class = {
1506         .next                   = &fair_sched_class,
1507         .enqueue_task           = enqueue_task_rt,
1508         .dequeue_task           = dequeue_task_rt,
1509         .yield_task             = yield_task_rt,
1510
1511         .check_preempt_curr     = check_preempt_curr_rt,
1512
1513         .pick_next_task         = pick_next_task_rt,
1514         .put_prev_task          = put_prev_task_rt,
1515
1516 #ifdef CONFIG_SMP
1517         .select_task_rq         = select_task_rq_rt,
1518
1519         .load_balance           = load_balance_rt,
1520         .move_one_task          = move_one_task_rt,
1521         .set_cpus_allowed       = set_cpus_allowed_rt,
1522         .rq_online              = rq_online_rt,
1523         .rq_offline             = rq_offline_rt,
1524         .pre_schedule           = pre_schedule_rt,
1525         .post_schedule          = post_schedule_rt,
1526         .task_wake_up           = task_wake_up_rt,
1527         .switched_from          = switched_from_rt,
1528 #endif
1529
1530         .set_curr_task          = set_curr_task_rt,
1531         .task_tick              = task_tick_rt,
1532
1533         .prio_changed           = prio_changed_rt,
1534         .switched_to            = switched_to_rt,
1535 };
1536
1537 #ifdef CONFIG_SCHED_DEBUG
1538 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1539
1540 static void print_rt_stats(struct seq_file *m, int cpu)
1541 {
1542         struct rt_rq *rt_rq;
1543
1544         rcu_read_lock();
1545         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1546                 print_rt_rq(m, cpu, rt_rq);
1547         rcu_read_unlock();
1548 }
1549 #endif /* CONFIG_SCHED_DEBUG */
1550