2e228bd5395e36cd49101411bca1e796fbd8fef1
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         if (!rq->online)
16                 return;
17
18         cpu_set(rq->cpu, rq->rd->rto_mask);
19         /*
20          * Make sure the mask is visible before we set
21          * the overload count. That is checked to determine
22          * if we should look at the mask. It would be a shame
23          * if we looked at the mask, but the mask was not
24          * updated yet.
25          */
26         wmb();
27         atomic_inc(&rq->rd->rto_count);
28 }
29
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         if (!rq->online)
33                 return;
34
35         /* the order here really doesn't matter */
36         atomic_dec(&rq->rd->rto_count);
37         cpu_clear(rq->cpu, rq->rd->rto_mask);
38 }
39
40 static void update_rt_migration(struct rq *rq)
41 {
42         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43                 if (!rq->rt.overloaded) {
44                         rt_set_overload(rq);
45                         rq->rt.overloaded = 1;
46                 }
47         } else if (rq->rt.overloaded) {
48                 rt_clear_overload(rq);
49                 rq->rt.overloaded = 0;
50         }
51 }
52 #endif /* CONFIG_SMP */
53
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55 {
56         return container_of(rt_se, struct task_struct, rt);
57 }
58
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60 {
61         return !list_empty(&rt_se->run_list);
62 }
63
64 #ifdef CONFIG_RT_GROUP_SCHED
65
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67 {
68         if (!rt_rq->tg)
69                 return RUNTIME_INF;
70
71         return rt_rq->rt_runtime;
72 }
73
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75 {
76         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77 }
78
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83 {
84         return rt_rq->rq;
85 }
86
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88 {
89         return rt_se->rt_rq;
90 }
91
92 #define for_each_sched_rt_entity(rt_se) \
93         for (; rt_se; rt_se = rt_se->parent)
94
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96 {
97         return rt_se->my_q;
98 }
99
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104 {
105         struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
110                 enqueue_rt_entity(rt_se);
111                 if (rt_rq->highest_prio < curr->prio)
112                         resched_task(curr);
113         }
114 }
115
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117 {
118         struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120         if (rt_se && on_rt_rq(rt_se))
121                 dequeue_rt_entity(rt_se);
122 }
123
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125 {
126         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127 }
128
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
130 {
131         struct rt_rq *rt_rq = group_rt_rq(rt_se);
132         struct task_struct *p;
133
134         if (rt_rq)
135                 return !!rt_rq->rt_nr_boosted;
136
137         p = rt_task_of(rt_se);
138         return p->prio != p->normal_prio;
139 }
140
141 #ifdef CONFIG_SMP
142 static inline cpumask_t sched_rt_period_mask(void)
143 {
144         return cpu_rq(smp_processor_id())->rd->span;
145 }
146 #else
147 static inline cpumask_t sched_rt_period_mask(void)
148 {
149         return cpu_online_map;
150 }
151 #endif
152
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155 {
156         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157 }
158
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160 {
161         return &rt_rq->tg->rt_bandwidth;
162 }
163
164 #else /* !CONFIG_RT_GROUP_SCHED */
165
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167 {
168         return rt_rq->rt_runtime;
169 }
170
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172 {
173         return ktime_to_ns(def_rt_bandwidth.rt_period);
174 }
175
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181         return container_of(rt_rq, struct rq, rt);
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         struct task_struct *p = rt_task_of(rt_se);
187         struct rq *rq = task_rq(p);
188
189         return &rq->rt;
190 }
191
192 #define for_each_sched_rt_entity(rt_se) \
193         for (; rt_se; rt_se = NULL)
194
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196 {
197         return NULL;
198 }
199
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         if (rt_rq->rt_nr_running)
203                 resched_task(rq_of_rt_rq(rt_rq)->curr);
204 }
205
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
207 {
208 }
209
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
211 {
212         return rt_rq->rt_throttled;
213 }
214
215 static inline cpumask_t sched_rt_period_mask(void)
216 {
217         return cpu_online_map;
218 }
219
220 static inline
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
222 {
223         return &cpu_rq(cpu)->rt;
224 }
225
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
227 {
228         return &def_rt_bandwidth;
229 }
230
231 #endif /* CONFIG_RT_GROUP_SCHED */
232
233 #ifdef CONFIG_SMP
234 static int do_balance_runtime(struct rt_rq *rt_rq)
235 {
236         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
237         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
238         int i, weight, more = 0;
239         u64 rt_period;
240
241         weight = cpus_weight(rd->span);
242
243         spin_lock(&rt_b->rt_runtime_lock);
244         rt_period = ktime_to_ns(rt_b->rt_period);
245         for_each_cpu_mask_nr(i, rd->span) {
246                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
247                 s64 diff;
248
249                 if (iter == rt_rq)
250                         continue;
251
252                 spin_lock(&iter->rt_runtime_lock);
253                 if (iter->rt_runtime == RUNTIME_INF)
254                         goto next;
255
256                 diff = iter->rt_runtime - iter->rt_time;
257                 if (diff > 0) {
258                         diff = div_u64((u64)diff, weight);
259                         if (rt_rq->rt_runtime + diff > rt_period)
260                                 diff = rt_period - rt_rq->rt_runtime;
261                         iter->rt_runtime -= diff;
262                         rt_rq->rt_runtime += diff;
263                         more = 1;
264                         if (rt_rq->rt_runtime == rt_period) {
265                                 spin_unlock(&iter->rt_runtime_lock);
266                                 break;
267                         }
268                 }
269 next:
270                 spin_unlock(&iter->rt_runtime_lock);
271         }
272         spin_unlock(&rt_b->rt_runtime_lock);
273
274         return more;
275 }
276
277 static void __disable_runtime(struct rq *rq)
278 {
279         struct root_domain *rd = rq->rd;
280         struct rt_rq *rt_rq;
281
282         if (unlikely(!scheduler_running))
283                 return;
284
285         for_each_leaf_rt_rq(rt_rq, rq) {
286                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
287                 s64 want;
288                 int i;
289
290                 spin_lock(&rt_b->rt_runtime_lock);
291                 spin_lock(&rt_rq->rt_runtime_lock);
292                 if (rt_rq->rt_runtime == RUNTIME_INF ||
293                                 rt_rq->rt_runtime == rt_b->rt_runtime)
294                         goto balanced;
295                 spin_unlock(&rt_rq->rt_runtime_lock);
296
297                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
298
299                 for_each_cpu_mask(i, rd->span) {
300                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
301                         s64 diff;
302
303                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
304                                 continue;
305
306                         spin_lock(&iter->rt_runtime_lock);
307                         if (want > 0) {
308                                 diff = min_t(s64, iter->rt_runtime, want);
309                                 iter->rt_runtime -= diff;
310                                 want -= diff;
311                         } else {
312                                 iter->rt_runtime -= want;
313                                 want -= want;
314                         }
315                         spin_unlock(&iter->rt_runtime_lock);
316
317                         if (!want)
318                                 break;
319                 }
320
321                 spin_lock(&rt_rq->rt_runtime_lock);
322                 BUG_ON(want);
323 balanced:
324                 rt_rq->rt_runtime = RUNTIME_INF;
325                 spin_unlock(&rt_rq->rt_runtime_lock);
326                 spin_unlock(&rt_b->rt_runtime_lock);
327         }
328 }
329
330 static void disable_runtime(struct rq *rq)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&rq->lock, flags);
335         __disable_runtime(rq);
336         spin_unlock_irqrestore(&rq->lock, flags);
337 }
338
339 static void __enable_runtime(struct rq *rq)
340 {
341         struct rt_rq *rt_rq;
342
343         if (unlikely(!scheduler_running))
344                 return;
345
346         for_each_leaf_rt_rq(rt_rq, rq) {
347                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
348
349                 spin_lock(&rt_b->rt_runtime_lock);
350                 spin_lock(&rt_rq->rt_runtime_lock);
351                 rt_rq->rt_runtime = rt_b->rt_runtime;
352                 rt_rq->rt_time = 0;
353                 rt_rq->rt_throttled = 0;
354                 spin_unlock(&rt_rq->rt_runtime_lock);
355                 spin_unlock(&rt_b->rt_runtime_lock);
356         }
357 }
358
359 static void enable_runtime(struct rq *rq)
360 {
361         unsigned long flags;
362
363         spin_lock_irqsave(&rq->lock, flags);
364         __enable_runtime(rq);
365         spin_unlock_irqrestore(&rq->lock, flags);
366 }
367
368 static int balance_runtime(struct rt_rq *rt_rq)
369 {
370         int more = 0;
371
372         if (rt_rq->rt_time > rt_rq->rt_runtime) {
373                 spin_unlock(&rt_rq->rt_runtime_lock);
374                 more = do_balance_runtime(rt_rq);
375                 spin_lock(&rt_rq->rt_runtime_lock);
376         }
377
378         return more;
379 }
380 #else /* !CONFIG_SMP */
381 static inline int balance_runtime(struct rt_rq *rt_rq)
382 {
383         return 0;
384 }
385 #endif /* CONFIG_SMP */
386
387 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
388 {
389         int i, idle = 1;
390         cpumask_t span;
391
392         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
393                 return 1;
394
395         span = sched_rt_period_mask();
396         for_each_cpu_mask(i, span) {
397                 int enqueue = 0;
398                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
399                 struct rq *rq = rq_of_rt_rq(rt_rq);
400
401                 spin_lock(&rq->lock);
402                 if (rt_rq->rt_time) {
403                         u64 runtime;
404
405                         spin_lock(&rt_rq->rt_runtime_lock);
406                         if (rt_rq->rt_throttled)
407                                 balance_runtime(rt_rq);
408                         runtime = rt_rq->rt_runtime;
409                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
410                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
411                                 rt_rq->rt_throttled = 0;
412                                 enqueue = 1;
413                         }
414                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
415                                 idle = 0;
416                         spin_unlock(&rt_rq->rt_runtime_lock);
417                 } else if (rt_rq->rt_nr_running)
418                         idle = 0;
419
420                 if (enqueue)
421                         sched_rt_rq_enqueue(rt_rq);
422                 spin_unlock(&rq->lock);
423         }
424
425         return idle;
426 }
427
428 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
429 {
430 #ifdef CONFIG_RT_GROUP_SCHED
431         struct rt_rq *rt_rq = group_rt_rq(rt_se);
432
433         if (rt_rq)
434                 return rt_rq->highest_prio;
435 #endif
436
437         return rt_task_of(rt_se)->prio;
438 }
439
440 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
441 {
442         u64 runtime = sched_rt_runtime(rt_rq);
443
444         if (rt_rq->rt_throttled)
445                 return rt_rq_throttled(rt_rq);
446
447         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
448                 return 0;
449
450         balance_runtime(rt_rq);
451         runtime = sched_rt_runtime(rt_rq);
452         if (runtime == RUNTIME_INF)
453                 return 0;
454
455         if (rt_rq->rt_time > runtime) {
456                 rt_rq->rt_throttled = 1;
457                 if (rt_rq_throttled(rt_rq)) {
458                         sched_rt_rq_dequeue(rt_rq);
459                         return 1;
460                 }
461         }
462
463         return 0;
464 }
465
466 /*
467  * Update the current task's runtime statistics. Skip current tasks that
468  * are not in our scheduling class.
469  */
470 static void update_curr_rt(struct rq *rq)
471 {
472         struct task_struct *curr = rq->curr;
473         struct sched_rt_entity *rt_se = &curr->rt;
474         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
475         u64 delta_exec;
476
477         if (!task_has_rt_policy(curr))
478                 return;
479
480         delta_exec = rq->clock - curr->se.exec_start;
481         if (unlikely((s64)delta_exec < 0))
482                 delta_exec = 0;
483
484         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
485
486         curr->se.sum_exec_runtime += delta_exec;
487         curr->se.exec_start = rq->clock;
488         cpuacct_charge(curr, delta_exec);
489
490         if (!rt_bandwidth_enabled())
491                 return;
492
493         for_each_sched_rt_entity(rt_se) {
494                 rt_rq = rt_rq_of_se(rt_se);
495
496                 spin_lock(&rt_rq->rt_runtime_lock);
497                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
498                         rt_rq->rt_time += delta_exec;
499                         if (sched_rt_runtime_exceeded(rt_rq))
500                                 resched_task(curr);
501                 }
502                 spin_unlock(&rt_rq->rt_runtime_lock);
503         }
504 }
505
506 static inline
507 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
508 {
509         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
510         rt_rq->rt_nr_running++;
511 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
512         if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
513 #ifdef CONFIG_SMP
514                 struct rq *rq = rq_of_rt_rq(rt_rq);
515 #endif
516
517                 rt_rq->highest_prio = rt_se_prio(rt_se);
518 #ifdef CONFIG_SMP
519                 if (rq->online)
520                         cpupri_set(&rq->rd->cpupri, rq->cpu,
521                                    rt_se_prio(rt_se));
522 #endif
523         }
524 #endif
525 #ifdef CONFIG_SMP
526         if (rt_se->nr_cpus_allowed > 1) {
527                 struct rq *rq = rq_of_rt_rq(rt_rq);
528
529                 rq->rt.rt_nr_migratory++;
530         }
531
532         update_rt_migration(rq_of_rt_rq(rt_rq));
533 #endif
534 #ifdef CONFIG_RT_GROUP_SCHED
535         if (rt_se_boosted(rt_se))
536                 rt_rq->rt_nr_boosted++;
537
538         if (rt_rq->tg)
539                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
540 #else
541         start_rt_bandwidth(&def_rt_bandwidth);
542 #endif
543 }
544
545 static inline
546 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
547 {
548 #ifdef CONFIG_SMP
549         int highest_prio = rt_rq->highest_prio;
550 #endif
551
552         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
553         WARN_ON(!rt_rq->rt_nr_running);
554         rt_rq->rt_nr_running--;
555 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
556         if (rt_rq->rt_nr_running) {
557                 struct rt_prio_array *array;
558
559                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
560                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
561                         /* recalculate */
562                         array = &rt_rq->active;
563                         rt_rq->highest_prio =
564                                 sched_find_first_bit(array->bitmap);
565                 } /* otherwise leave rq->highest prio alone */
566         } else
567                 rt_rq->highest_prio = MAX_RT_PRIO;
568 #endif
569 #ifdef CONFIG_SMP
570         if (rt_se->nr_cpus_allowed > 1) {
571                 struct rq *rq = rq_of_rt_rq(rt_rq);
572                 rq->rt.rt_nr_migratory--;
573         }
574
575         if (rt_rq->highest_prio != highest_prio) {
576                 struct rq *rq = rq_of_rt_rq(rt_rq);
577
578                 if (rq->online)
579                         cpupri_set(&rq->rd->cpupri, rq->cpu,
580                                    rt_rq->highest_prio);
581         }
582
583         update_rt_migration(rq_of_rt_rq(rt_rq));
584 #endif /* CONFIG_SMP */
585 #ifdef CONFIG_RT_GROUP_SCHED
586         if (rt_se_boosted(rt_se))
587                 rt_rq->rt_nr_boosted--;
588
589         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
590 #endif
591 }
592
593 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
594 {
595         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
596         struct rt_prio_array *array = &rt_rq->active;
597         struct rt_rq *group_rq = group_rt_rq(rt_se);
598         struct list_head *queue = array->queue + rt_se_prio(rt_se);
599
600         /*
601          * Don't enqueue the group if its throttled, or when empty.
602          * The latter is a consequence of the former when a child group
603          * get throttled and the current group doesn't have any other
604          * active members.
605          */
606         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
607                 return;
608
609         list_add_tail(&rt_se->run_list, queue);
610         __set_bit(rt_se_prio(rt_se), array->bitmap);
611
612         inc_rt_tasks(rt_se, rt_rq);
613 }
614
615 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
616 {
617         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
618         struct rt_prio_array *array = &rt_rq->active;
619
620         list_del_init(&rt_se->run_list);
621         if (list_empty(array->queue + rt_se_prio(rt_se)))
622                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
623
624         dec_rt_tasks(rt_se, rt_rq);
625 }
626
627 /*
628  * Because the prio of an upper entry depends on the lower
629  * entries, we must remove entries top - down.
630  */
631 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
632 {
633         struct sched_rt_entity *back = NULL;
634
635         for_each_sched_rt_entity(rt_se) {
636                 rt_se->back = back;
637                 back = rt_se;
638         }
639
640         for (rt_se = back; rt_se; rt_se = rt_se->back) {
641                 if (on_rt_rq(rt_se))
642                         __dequeue_rt_entity(rt_se);
643         }
644 }
645
646 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
647 {
648         dequeue_rt_stack(rt_se);
649         for_each_sched_rt_entity(rt_se)
650                 __enqueue_rt_entity(rt_se);
651 }
652
653 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
654 {
655         dequeue_rt_stack(rt_se);
656
657         for_each_sched_rt_entity(rt_se) {
658                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
659
660                 if (rt_rq && rt_rq->rt_nr_running)
661                         __enqueue_rt_entity(rt_se);
662         }
663 }
664
665 /*
666  * Adding/removing a task to/from a priority array:
667  */
668 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
669 {
670         struct sched_rt_entity *rt_se = &p->rt;
671
672         if (wakeup)
673                 rt_se->timeout = 0;
674
675         enqueue_rt_entity(rt_se);
676
677         inc_cpu_load(rq, p->se.load.weight);
678 }
679
680 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
681 {
682         struct sched_rt_entity *rt_se = &p->rt;
683
684         update_curr_rt(rq);
685         dequeue_rt_entity(rt_se);
686
687         dec_cpu_load(rq, p->se.load.weight);
688 }
689
690 /*
691  * Put task to the end of the run list without the overhead of dequeue
692  * followed by enqueue.
693  */
694 static void
695 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
696 {
697         if (on_rt_rq(rt_se)) {
698                 struct rt_prio_array *array = &rt_rq->active;
699                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
700
701                 if (head)
702                         list_move(&rt_se->run_list, queue);
703                 else
704                         list_move_tail(&rt_se->run_list, queue);
705         }
706 }
707
708 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
709 {
710         struct sched_rt_entity *rt_se = &p->rt;
711         struct rt_rq *rt_rq;
712
713         for_each_sched_rt_entity(rt_se) {
714                 rt_rq = rt_rq_of_se(rt_se);
715                 requeue_rt_entity(rt_rq, rt_se, head);
716         }
717 }
718
719 static void yield_task_rt(struct rq *rq)
720 {
721         requeue_task_rt(rq, rq->curr, 0);
722 }
723
724 #ifdef CONFIG_SMP
725 static int find_lowest_rq(struct task_struct *task);
726
727 static int select_task_rq_rt(struct task_struct *p, int sync)
728 {
729         struct rq *rq = task_rq(p);
730
731         /*
732          * If the current task is an RT task, then
733          * try to see if we can wake this RT task up on another
734          * runqueue. Otherwise simply start this RT task
735          * on its current runqueue.
736          *
737          * We want to avoid overloading runqueues. Even if
738          * the RT task is of higher priority than the current RT task.
739          * RT tasks behave differently than other tasks. If
740          * one gets preempted, we try to push it off to another queue.
741          * So trying to keep a preempting RT task on the same
742          * cache hot CPU will force the running RT task to
743          * a cold CPU. So we waste all the cache for the lower
744          * RT task in hopes of saving some of a RT task
745          * that is just being woken and probably will have
746          * cold cache anyway.
747          */
748         if (unlikely(rt_task(rq->curr)) &&
749             (p->rt.nr_cpus_allowed > 1)) {
750                 int cpu = find_lowest_rq(p);
751
752                 return (cpu == -1) ? task_cpu(p) : cpu;
753         }
754
755         /*
756          * Otherwise, just let it ride on the affined RQ and the
757          * post-schedule router will push the preempted task away
758          */
759         return task_cpu(p);
760 }
761
762 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
763 {
764         cpumask_t mask;
765
766         if (rq->curr->rt.nr_cpus_allowed == 1)
767                 return;
768
769         if (p->rt.nr_cpus_allowed != 1
770             && cpupri_find(&rq->rd->cpupri, p, &mask))
771                 return;
772
773         if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
774                 return;
775
776         /*
777          * There appears to be other cpus that can accept
778          * current and none to run 'p', so lets reschedule
779          * to try and push current away:
780          */
781         requeue_task_rt(rq, p, 1);
782         resched_task(rq->curr);
783 }
784
785 #endif /* CONFIG_SMP */
786
787 /*
788  * Preempt the current task with a newly woken task if needed:
789  */
790 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
791 {
792         if (p->prio < rq->curr->prio) {
793                 resched_task(rq->curr);
794                 return;
795         }
796
797 #ifdef CONFIG_SMP
798         /*
799          * If:
800          *
801          * - the newly woken task is of equal priority to the current task
802          * - the newly woken task is non-migratable while current is migratable
803          * - current will be preempted on the next reschedule
804          *
805          * we should check to see if current can readily move to a different
806          * cpu.  If so, we will reschedule to allow the push logic to try
807          * to move current somewhere else, making room for our non-migratable
808          * task.
809          */
810         if (p->prio == rq->curr->prio && !need_resched())
811                 check_preempt_equal_prio(rq, p);
812 #endif
813 }
814
815 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
816                                                    struct rt_rq *rt_rq)
817 {
818         struct rt_prio_array *array = &rt_rq->active;
819         struct sched_rt_entity *next = NULL;
820         struct list_head *queue;
821         int idx;
822
823         idx = sched_find_first_bit(array->bitmap);
824         BUG_ON(idx >= MAX_RT_PRIO);
825
826         queue = array->queue + idx;
827         next = list_entry(queue->next, struct sched_rt_entity, run_list);
828
829         return next;
830 }
831
832 static struct task_struct *pick_next_task_rt(struct rq *rq)
833 {
834         struct sched_rt_entity *rt_se;
835         struct task_struct *p;
836         struct rt_rq *rt_rq;
837
838         rt_rq = &rq->rt;
839
840         if (unlikely(!rt_rq->rt_nr_running))
841                 return NULL;
842
843         if (rt_rq_throttled(rt_rq))
844                 return NULL;
845
846         do {
847                 rt_se = pick_next_rt_entity(rq, rt_rq);
848                 BUG_ON(!rt_se);
849                 rt_rq = group_rt_rq(rt_se);
850         } while (rt_rq);
851
852         p = rt_task_of(rt_se);
853         p->se.exec_start = rq->clock;
854         return p;
855 }
856
857 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
858 {
859         update_curr_rt(rq);
860         p->se.exec_start = 0;
861 }
862
863 #ifdef CONFIG_SMP
864
865 /* Only try algorithms three times */
866 #define RT_MAX_TRIES 3
867
868 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
869 static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
870
871 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
872
873 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
874 {
875         if (!task_running(rq, p) &&
876             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
877             (p->rt.nr_cpus_allowed > 1))
878                 return 1;
879         return 0;
880 }
881
882 /* Return the second highest RT task, NULL otherwise */
883 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
884 {
885         struct task_struct *next = NULL;
886         struct sched_rt_entity *rt_se;
887         struct rt_prio_array *array;
888         struct rt_rq *rt_rq;
889         int idx;
890
891         for_each_leaf_rt_rq(rt_rq, rq) {
892                 array = &rt_rq->active;
893                 idx = sched_find_first_bit(array->bitmap);
894  next_idx:
895                 if (idx >= MAX_RT_PRIO)
896                         continue;
897                 if (next && next->prio < idx)
898                         continue;
899                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
900                         struct task_struct *p = rt_task_of(rt_se);
901                         if (pick_rt_task(rq, p, cpu)) {
902                                 next = p;
903                                 break;
904                         }
905                 }
906                 if (!next) {
907                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
908                         goto next_idx;
909                 }
910         }
911
912         return next;
913 }
914
915 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
916
917 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
918 {
919         int first;
920
921         /* "this_cpu" is cheaper to preempt than a remote processor */
922         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
923                 return this_cpu;
924
925         first = first_cpu(*mask);
926         if (first != NR_CPUS)
927                 return first;
928
929         return -1;
930 }
931
932 static int find_lowest_rq(struct task_struct *task)
933 {
934         struct sched_domain *sd;
935         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
936         int this_cpu = smp_processor_id();
937         int cpu      = task_cpu(task);
938
939         if (task->rt.nr_cpus_allowed == 1)
940                 return -1; /* No other targets possible */
941
942         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
943                 return -1; /* No targets found */
944
945         /*
946          * Only consider CPUs that are usable for migration.
947          * I guess we might want to change cpupri_find() to ignore those
948          * in the first place.
949          */
950         cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
951
952         /*
953          * At this point we have built a mask of cpus representing the
954          * lowest priority tasks in the system.  Now we want to elect
955          * the best one based on our affinity and topology.
956          *
957          * We prioritize the last cpu that the task executed on since
958          * it is most likely cache-hot in that location.
959          */
960         if (cpu_isset(cpu, *lowest_mask))
961                 return cpu;
962
963         /*
964          * Otherwise, we consult the sched_domains span maps to figure
965          * out which cpu is logically closest to our hot cache data.
966          */
967         if (this_cpu == cpu)
968                 this_cpu = -1; /* Skip this_cpu opt if the same */
969
970         for_each_domain(cpu, sd) {
971                 if (sd->flags & SD_WAKE_AFFINE) {
972                         cpumask_t domain_mask;
973                         int       best_cpu;
974
975                         cpus_and(domain_mask, sd->span, *lowest_mask);
976
977                         best_cpu = pick_optimal_cpu(this_cpu,
978                                                     &domain_mask);
979                         if (best_cpu != -1)
980                                 return best_cpu;
981                 }
982         }
983
984         /*
985          * And finally, if there were no matches within the domains
986          * just give the caller *something* to work with from the compatible
987          * locations.
988          */
989         return pick_optimal_cpu(this_cpu, lowest_mask);
990 }
991
992 /* Will lock the rq it finds */
993 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
994 {
995         struct rq *lowest_rq = NULL;
996         int tries;
997         int cpu;
998
999         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1000                 cpu = find_lowest_rq(task);
1001
1002                 if ((cpu == -1) || (cpu == rq->cpu))
1003                         break;
1004
1005                 lowest_rq = cpu_rq(cpu);
1006
1007                 /* if the prio of this runqueue changed, try again */
1008                 if (double_lock_balance(rq, lowest_rq)) {
1009                         /*
1010                          * We had to unlock the run queue. In
1011                          * the mean time, task could have
1012                          * migrated already or had its affinity changed.
1013                          * Also make sure that it wasn't scheduled on its rq.
1014                          */
1015                         if (unlikely(task_rq(task) != rq ||
1016                                      !cpu_isset(lowest_rq->cpu,
1017                                                 task->cpus_allowed) ||
1018                                      task_running(rq, task) ||
1019                                      !task->se.on_rq)) {
1020
1021                                 spin_unlock(&lowest_rq->lock);
1022                                 lowest_rq = NULL;
1023                                 break;
1024                         }
1025                 }
1026
1027                 /* If this rq is still suitable use it. */
1028                 if (lowest_rq->rt.highest_prio > task->prio)
1029                         break;
1030
1031                 /* try again */
1032                 double_unlock_balance(rq, lowest_rq);
1033                 lowest_rq = NULL;
1034         }
1035
1036         return lowest_rq;
1037 }
1038
1039 /*
1040  * If the current CPU has more than one RT task, see if the non
1041  * running task can migrate over to a CPU that is running a task
1042  * of lesser priority.
1043  */
1044 static int push_rt_task(struct rq *rq)
1045 {
1046         struct task_struct *next_task;
1047         struct rq *lowest_rq;
1048         int ret = 0;
1049         int paranoid = RT_MAX_TRIES;
1050
1051         if (!rq->rt.overloaded)
1052                 return 0;
1053
1054         next_task = pick_next_highest_task_rt(rq, -1);
1055         if (!next_task)
1056                 return 0;
1057
1058  retry:
1059         if (unlikely(next_task == rq->curr)) {
1060                 WARN_ON(1);
1061                 return 0;
1062         }
1063
1064         /*
1065          * It's possible that the next_task slipped in of
1066          * higher priority than current. If that's the case
1067          * just reschedule current.
1068          */
1069         if (unlikely(next_task->prio < rq->curr->prio)) {
1070                 resched_task(rq->curr);
1071                 return 0;
1072         }
1073
1074         /* We might release rq lock */
1075         get_task_struct(next_task);
1076
1077         /* find_lock_lowest_rq locks the rq if found */
1078         lowest_rq = find_lock_lowest_rq(next_task, rq);
1079         if (!lowest_rq) {
1080                 struct task_struct *task;
1081                 /*
1082                  * find lock_lowest_rq releases rq->lock
1083                  * so it is possible that next_task has changed.
1084                  * If it has, then try again.
1085                  */
1086                 task = pick_next_highest_task_rt(rq, -1);
1087                 if (unlikely(task != next_task) && task && paranoid--) {
1088                         put_task_struct(next_task);
1089                         next_task = task;
1090                         goto retry;
1091                 }
1092                 goto out;
1093         }
1094
1095         deactivate_task(rq, next_task, 0);
1096         set_task_cpu(next_task, lowest_rq->cpu);
1097         activate_task(lowest_rq, next_task, 0);
1098
1099         resched_task(lowest_rq->curr);
1100
1101         double_unlock_balance(rq, lowest_rq);
1102
1103         ret = 1;
1104 out:
1105         put_task_struct(next_task);
1106
1107         return ret;
1108 }
1109
1110 /*
1111  * TODO: Currently we just use the second highest prio task on
1112  *       the queue, and stop when it can't migrate (or there's
1113  *       no more RT tasks).  There may be a case where a lower
1114  *       priority RT task has a different affinity than the
1115  *       higher RT task. In this case the lower RT task could
1116  *       possibly be able to migrate where as the higher priority
1117  *       RT task could not.  We currently ignore this issue.
1118  *       Enhancements are welcome!
1119  */
1120 static void push_rt_tasks(struct rq *rq)
1121 {
1122         /* push_rt_task will return true if it moved an RT */
1123         while (push_rt_task(rq))
1124                 ;
1125 }
1126
1127 static int pull_rt_task(struct rq *this_rq)
1128 {
1129         int this_cpu = this_rq->cpu, ret = 0, cpu;
1130         struct task_struct *p, *next;
1131         struct rq *src_rq;
1132
1133         if (likely(!rt_overloaded(this_rq)))
1134                 return 0;
1135
1136         next = pick_next_task_rt(this_rq);
1137
1138         for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1139                 if (this_cpu == cpu)
1140                         continue;
1141
1142                 src_rq = cpu_rq(cpu);
1143                 /*
1144                  * We can potentially drop this_rq's lock in
1145                  * double_lock_balance, and another CPU could
1146                  * steal our next task - hence we must cause
1147                  * the caller to recalculate the next task
1148                  * in that case:
1149                  */
1150                 if (double_lock_balance(this_rq, src_rq)) {
1151                         struct task_struct *old_next = next;
1152
1153                         next = pick_next_task_rt(this_rq);
1154                         if (next != old_next)
1155                                 ret = 1;
1156                 }
1157
1158                 /*
1159                  * Are there still pullable RT tasks?
1160                  */
1161                 if (src_rq->rt.rt_nr_running <= 1)
1162                         goto skip;
1163
1164                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1165
1166                 /*
1167                  * Do we have an RT task that preempts
1168                  * the to-be-scheduled task?
1169                  */
1170                 if (p && (!next || (p->prio < next->prio))) {
1171                         WARN_ON(p == src_rq->curr);
1172                         WARN_ON(!p->se.on_rq);
1173
1174                         /*
1175                          * There's a chance that p is higher in priority
1176                          * than what's currently running on its cpu.
1177                          * This is just that p is wakeing up and hasn't
1178                          * had a chance to schedule. We only pull
1179                          * p if it is lower in priority than the
1180                          * current task on the run queue or
1181                          * this_rq next task is lower in prio than
1182                          * the current task on that rq.
1183                          */
1184                         if (p->prio < src_rq->curr->prio ||
1185                             (next && next->prio < src_rq->curr->prio))
1186                                 goto skip;
1187
1188                         ret = 1;
1189
1190                         deactivate_task(src_rq, p, 0);
1191                         set_task_cpu(p, this_cpu);
1192                         activate_task(this_rq, p, 0);
1193                         /*
1194                          * We continue with the search, just in
1195                          * case there's an even higher prio task
1196                          * in another runqueue. (low likelyhood
1197                          * but possible)
1198                          *
1199                          * Update next so that we won't pick a task
1200                          * on another cpu with a priority lower (or equal)
1201                          * than the one we just picked.
1202                          */
1203                         next = p;
1204
1205                 }
1206  skip:
1207                 double_unlock_balance(this_rq, src_rq);
1208         }
1209
1210         return ret;
1211 }
1212
1213 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1214 {
1215         /* Try to pull RT tasks here if we lower this rq's prio */
1216         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1217                 pull_rt_task(rq);
1218 }
1219
1220 static void post_schedule_rt(struct rq *rq)
1221 {
1222         /*
1223          * If we have more than one rt_task queued, then
1224          * see if we can push the other rt_tasks off to other CPUS.
1225          * Note we may release the rq lock, and since
1226          * the lock was owned by prev, we need to release it
1227          * first via finish_lock_switch and then reaquire it here.
1228          */
1229         if (unlikely(rq->rt.overloaded)) {
1230                 spin_lock_irq(&rq->lock);
1231                 push_rt_tasks(rq);
1232                 spin_unlock_irq(&rq->lock);
1233         }
1234 }
1235
1236 /*
1237  * If we are not running and we are not going to reschedule soon, we should
1238  * try to push tasks away now
1239  */
1240 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1241 {
1242         if (!task_running(rq, p) &&
1243             !test_tsk_need_resched(rq->curr) &&
1244             rq->rt.overloaded)
1245                 push_rt_tasks(rq);
1246 }
1247
1248 static unsigned long
1249 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1250                 unsigned long max_load_move,
1251                 struct sched_domain *sd, enum cpu_idle_type idle,
1252                 int *all_pinned, int *this_best_prio)
1253 {
1254         /* don't touch RT tasks */
1255         return 0;
1256 }
1257
1258 static int
1259 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1260                  struct sched_domain *sd, enum cpu_idle_type idle)
1261 {
1262         /* don't touch RT tasks */
1263         return 0;
1264 }
1265
1266 static void set_cpus_allowed_rt(struct task_struct *p,
1267                                 const cpumask_t *new_mask)
1268 {
1269         int weight = cpus_weight(*new_mask);
1270
1271         BUG_ON(!rt_task(p));
1272
1273         /*
1274          * Update the migration status of the RQ if we have an RT task
1275          * which is running AND changing its weight value.
1276          */
1277         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1278                 struct rq *rq = task_rq(p);
1279
1280                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1281                         rq->rt.rt_nr_migratory++;
1282                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1283                         BUG_ON(!rq->rt.rt_nr_migratory);
1284                         rq->rt.rt_nr_migratory--;
1285                 }
1286
1287                 update_rt_migration(rq);
1288         }
1289
1290         p->cpus_allowed    = *new_mask;
1291         p->rt.nr_cpus_allowed = weight;
1292 }
1293
1294 /* Assumes rq->lock is held */
1295 static void rq_online_rt(struct rq *rq)
1296 {
1297         if (rq->rt.overloaded)
1298                 rt_set_overload(rq);
1299
1300         __enable_runtime(rq);
1301
1302         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1303 }
1304
1305 /* Assumes rq->lock is held */
1306 static void rq_offline_rt(struct rq *rq)
1307 {
1308         if (rq->rt.overloaded)
1309                 rt_clear_overload(rq);
1310
1311         __disable_runtime(rq);
1312
1313         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1314 }
1315
1316 /*
1317  * When switch from the rt queue, we bring ourselves to a position
1318  * that we might want to pull RT tasks from other runqueues.
1319  */
1320 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1321                            int running)
1322 {
1323         /*
1324          * If there are other RT tasks then we will reschedule
1325          * and the scheduling of the other RT tasks will handle
1326          * the balancing. But if we are the last RT task
1327          * we may need to handle the pulling of RT tasks
1328          * now.
1329          */
1330         if (!rq->rt.rt_nr_running)
1331                 pull_rt_task(rq);
1332 }
1333 #endif /* CONFIG_SMP */
1334
1335 /*
1336  * When switching a task to RT, we may overload the runqueue
1337  * with RT tasks. In this case we try to push them off to
1338  * other runqueues.
1339  */
1340 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1341                            int running)
1342 {
1343         int check_resched = 1;
1344
1345         /*
1346          * If we are already running, then there's nothing
1347          * that needs to be done. But if we are not running
1348          * we may need to preempt the current running task.
1349          * If that current running task is also an RT task
1350          * then see if we can move to another run queue.
1351          */
1352         if (!running) {
1353 #ifdef CONFIG_SMP
1354                 if (rq->rt.overloaded && push_rt_task(rq) &&
1355                     /* Don't resched if we changed runqueues */
1356                     rq != task_rq(p))
1357                         check_resched = 0;
1358 #endif /* CONFIG_SMP */
1359                 if (check_resched && p->prio < rq->curr->prio)
1360                         resched_task(rq->curr);
1361         }
1362 }
1363
1364 /*
1365  * Priority of the task has changed. This may cause
1366  * us to initiate a push or pull.
1367  */
1368 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1369                             int oldprio, int running)
1370 {
1371         if (running) {
1372 #ifdef CONFIG_SMP
1373                 /*
1374                  * If our priority decreases while running, we
1375                  * may need to pull tasks to this runqueue.
1376                  */
1377                 if (oldprio < p->prio)
1378                         pull_rt_task(rq);
1379                 /*
1380                  * If there's a higher priority task waiting to run
1381                  * then reschedule. Note, the above pull_rt_task
1382                  * can release the rq lock and p could migrate.
1383                  * Only reschedule if p is still on the same runqueue.
1384                  */
1385                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1386                         resched_task(p);
1387 #else
1388                 /* For UP simply resched on drop of prio */
1389                 if (oldprio < p->prio)
1390                         resched_task(p);
1391 #endif /* CONFIG_SMP */
1392         } else {
1393                 /*
1394                  * This task is not running, but if it is
1395                  * greater than the current running task
1396                  * then reschedule.
1397                  */
1398                 if (p->prio < rq->curr->prio)
1399                         resched_task(rq->curr);
1400         }
1401 }
1402
1403 static void watchdog(struct rq *rq, struct task_struct *p)
1404 {
1405         unsigned long soft, hard;
1406
1407         if (!p->signal)
1408                 return;
1409
1410         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1411         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1412
1413         if (soft != RLIM_INFINITY) {
1414                 unsigned long next;
1415
1416                 p->rt.timeout++;
1417                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1418                 if (p->rt.timeout > next)
1419                         p->it_sched_expires = p->se.sum_exec_runtime;
1420         }
1421 }
1422
1423 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1424 {
1425         update_curr_rt(rq);
1426
1427         watchdog(rq, p);
1428
1429         /*
1430          * RR tasks need a special form of timeslice management.
1431          * FIFO tasks have no timeslices.
1432          */
1433         if (p->policy != SCHED_RR)
1434                 return;
1435
1436         if (--p->rt.time_slice)
1437                 return;
1438
1439         p->rt.time_slice = DEF_TIMESLICE;
1440
1441         /*
1442          * Requeue to the end of queue if we are not the only element
1443          * on the queue:
1444          */
1445         if (p->rt.run_list.prev != p->rt.run_list.next) {
1446                 requeue_task_rt(rq, p, 0);
1447                 set_tsk_need_resched(p);
1448         }
1449 }
1450
1451 static void set_curr_task_rt(struct rq *rq)
1452 {
1453         struct task_struct *p = rq->curr;
1454
1455         p->se.exec_start = rq->clock;
1456 }
1457
1458 static const struct sched_class rt_sched_class = {
1459         .next                   = &fair_sched_class,
1460         .enqueue_task           = enqueue_task_rt,
1461         .dequeue_task           = dequeue_task_rt,
1462         .yield_task             = yield_task_rt,
1463 #ifdef CONFIG_SMP
1464         .select_task_rq         = select_task_rq_rt,
1465 #endif /* CONFIG_SMP */
1466
1467         .check_preempt_curr     = check_preempt_curr_rt,
1468
1469         .pick_next_task         = pick_next_task_rt,
1470         .put_prev_task          = put_prev_task_rt,
1471
1472 #ifdef CONFIG_SMP
1473         .load_balance           = load_balance_rt,
1474         .move_one_task          = move_one_task_rt,
1475         .set_cpus_allowed       = set_cpus_allowed_rt,
1476         .rq_online              = rq_online_rt,
1477         .rq_offline             = rq_offline_rt,
1478         .pre_schedule           = pre_schedule_rt,
1479         .post_schedule          = post_schedule_rt,
1480         .task_wake_up           = task_wake_up_rt,
1481         .switched_from          = switched_from_rt,
1482 #endif
1483
1484         .set_curr_task          = set_curr_task_rt,
1485         .task_tick              = task_tick_rt,
1486
1487         .prio_changed           = prio_changed_rt,
1488         .switched_to            = switched_to_rt,
1489 };
1490
1491 #ifdef CONFIG_SCHED_DEBUG
1492 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1493
1494 static void print_rt_stats(struct seq_file *m, int cpu)
1495 {
1496         struct rt_rq *rt_rq;
1497
1498         rcu_read_lock();
1499         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1500                 print_rt_rq(m, cpu, rt_rq);
1501         rcu_read_unlock();
1502 }
1503 #endif /* CONFIG_SCHED_DEBUG */