sched: Add debug check to task_of()
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_RT_GROUP_SCHED
7
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11 {
12 #ifdef CONFIG_SCHED_DEBUG
13         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14 #endif
15         return container_of(rt_se, struct task_struct, rt);
16 }
17
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19 {
20         return rt_rq->rq;
21 }
22
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24 {
25         return rt_se->rt_rq;
26 }
27
28 #else /* CONFIG_RT_GROUP_SCHED */
29
30 #define rt_entity_is_task(rt_se) (1)
31
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33 {
34         return container_of(rt_se, struct task_struct, rt);
35 }
36
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38 {
39         return container_of(rt_rq, struct rq, rt);
40 }
41
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43 {
44         struct task_struct *p = rt_task_of(rt_se);
45         struct rq *rq = task_rq(p);
46
47         return &rq->rt;
48 }
49
50 #endif /* CONFIG_RT_GROUP_SCHED */
51
52 #ifdef CONFIG_SMP
53
54 static inline int rt_overloaded(struct rq *rq)
55 {
56         return atomic_read(&rq->rd->rto_count);
57 }
58
59 static inline void rt_set_overload(struct rq *rq)
60 {
61         if (!rq->online)
62                 return;
63
64         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65         /*
66          * Make sure the mask is visible before we set
67          * the overload count. That is checked to determine
68          * if we should look at the mask. It would be a shame
69          * if we looked at the mask, but the mask was not
70          * updated yet.
71          */
72         wmb();
73         atomic_inc(&rq->rd->rto_count);
74 }
75
76 static inline void rt_clear_overload(struct rq *rq)
77 {
78         if (!rq->online)
79                 return;
80
81         /* the order here really doesn't matter */
82         atomic_dec(&rq->rd->rto_count);
83         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84 }
85
86 static void update_rt_migration(struct rt_rq *rt_rq)
87 {
88         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89                 if (!rt_rq->overloaded) {
90                         rt_set_overload(rq_of_rt_rq(rt_rq));
91                         rt_rq->overloaded = 1;
92                 }
93         } else if (rt_rq->overloaded) {
94                 rt_clear_overload(rq_of_rt_rq(rt_rq));
95                 rt_rq->overloaded = 0;
96         }
97 }
98
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100 {
101         if (!rt_entity_is_task(rt_se))
102                 return;
103
104         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106         rt_rq->rt_nr_total++;
107         if (rt_se->nr_cpus_allowed > 1)
108                 rt_rq->rt_nr_migratory++;
109
110         update_rt_migration(rt_rq);
111 }
112
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114 {
115         if (!rt_entity_is_task(rt_se))
116                 return;
117
118         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120         rt_rq->rt_nr_total--;
121         if (rt_se->nr_cpus_allowed > 1)
122                 rt_rq->rt_nr_migratory--;
123
124         update_rt_migration(rt_rq);
125 }
126
127 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128 {
129         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130         plist_node_init(&p->pushable_tasks, p->prio);
131         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132 }
133
134 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135 {
136         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137 }
138
139 #else
140
141 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
142 {
143 }
144
145 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
146 {
147 }
148
149 static inline
150 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
151 {
152 }
153
154 static inline
155 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156 {
157 }
158
159 #endif /* CONFIG_SMP */
160
161 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
162 {
163         return !list_empty(&rt_se->run_list);
164 }
165
166 #ifdef CONFIG_RT_GROUP_SCHED
167
168 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
169 {
170         if (!rt_rq->tg)
171                 return RUNTIME_INF;
172
173         return rt_rq->rt_runtime;
174 }
175
176 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
177 {
178         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
179 }
180
181 #define for_each_leaf_rt_rq(rt_rq, rq) \
182         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
183
184 #define for_each_sched_rt_entity(rt_se) \
185         for (; rt_se; rt_se = rt_se->parent)
186
187 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
188 {
189         return rt_se->my_q;
190 }
191
192 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
193 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
194
195 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
196 {
197         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
198         struct sched_rt_entity *rt_se = rt_rq->rt_se;
199
200         if (rt_rq->rt_nr_running) {
201                 if (rt_se && !on_rt_rq(rt_se))
202                         enqueue_rt_entity(rt_se);
203                 if (rt_rq->highest_prio.curr < curr->prio)
204                         resched_task(curr);
205         }
206 }
207
208 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
209 {
210         struct sched_rt_entity *rt_se = rt_rq->rt_se;
211
212         if (rt_se && on_rt_rq(rt_se))
213                 dequeue_rt_entity(rt_se);
214 }
215
216 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
217 {
218         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
219 }
220
221 static int rt_se_boosted(struct sched_rt_entity *rt_se)
222 {
223         struct rt_rq *rt_rq = group_rt_rq(rt_se);
224         struct task_struct *p;
225
226         if (rt_rq)
227                 return !!rt_rq->rt_nr_boosted;
228
229         p = rt_task_of(rt_se);
230         return p->prio != p->normal_prio;
231 }
232
233 #ifdef CONFIG_SMP
234 static inline const struct cpumask *sched_rt_period_mask(void)
235 {
236         return cpu_rq(smp_processor_id())->rd->span;
237 }
238 #else
239 static inline const struct cpumask *sched_rt_period_mask(void)
240 {
241         return cpu_online_mask;
242 }
243 #endif
244
245 static inline
246 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
247 {
248         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
249 }
250
251 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
252 {
253         return &rt_rq->tg->rt_bandwidth;
254 }
255
256 #else /* !CONFIG_RT_GROUP_SCHED */
257
258 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
259 {
260         return rt_rq->rt_runtime;
261 }
262
263 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
264 {
265         return ktime_to_ns(def_rt_bandwidth.rt_period);
266 }
267
268 #define for_each_leaf_rt_rq(rt_rq, rq) \
269         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
270
271 #define for_each_sched_rt_entity(rt_se) \
272         for (; rt_se; rt_se = NULL)
273
274 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
275 {
276         return NULL;
277 }
278
279 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
280 {
281         if (rt_rq->rt_nr_running)
282                 resched_task(rq_of_rt_rq(rt_rq)->curr);
283 }
284
285 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
286 {
287 }
288
289 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
290 {
291         return rt_rq->rt_throttled;
292 }
293
294 static inline const struct cpumask *sched_rt_period_mask(void)
295 {
296         return cpu_online_mask;
297 }
298
299 static inline
300 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
301 {
302         return &cpu_rq(cpu)->rt;
303 }
304
305 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
306 {
307         return &def_rt_bandwidth;
308 }
309
310 #endif /* CONFIG_RT_GROUP_SCHED */
311
312 #ifdef CONFIG_SMP
313 /*
314  * We ran out of runtime, see if we can borrow some from our neighbours.
315  */
316 static int do_balance_runtime(struct rt_rq *rt_rq)
317 {
318         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
319         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
320         int i, weight, more = 0;
321         u64 rt_period;
322
323         weight = cpumask_weight(rd->span);
324
325         spin_lock(&rt_b->rt_runtime_lock);
326         rt_period = ktime_to_ns(rt_b->rt_period);
327         for_each_cpu(i, rd->span) {
328                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
329                 s64 diff;
330
331                 if (iter == rt_rq)
332                         continue;
333
334                 spin_lock(&iter->rt_runtime_lock);
335                 /*
336                  * Either all rqs have inf runtime and there's nothing to steal
337                  * or __disable_runtime() below sets a specific rq to inf to
338                  * indicate its been disabled and disalow stealing.
339                  */
340                 if (iter->rt_runtime == RUNTIME_INF)
341                         goto next;
342
343                 /*
344                  * From runqueues with spare time, take 1/n part of their
345                  * spare time, but no more than our period.
346                  */
347                 diff = iter->rt_runtime - iter->rt_time;
348                 if (diff > 0) {
349                         diff = div_u64((u64)diff, weight);
350                         if (rt_rq->rt_runtime + diff > rt_period)
351                                 diff = rt_period - rt_rq->rt_runtime;
352                         iter->rt_runtime -= diff;
353                         rt_rq->rt_runtime += diff;
354                         more = 1;
355                         if (rt_rq->rt_runtime == rt_period) {
356                                 spin_unlock(&iter->rt_runtime_lock);
357                                 break;
358                         }
359                 }
360 next:
361                 spin_unlock(&iter->rt_runtime_lock);
362         }
363         spin_unlock(&rt_b->rt_runtime_lock);
364
365         return more;
366 }
367
368 /*
369  * Ensure this RQ takes back all the runtime it lend to its neighbours.
370  */
371 static void __disable_runtime(struct rq *rq)
372 {
373         struct root_domain *rd = rq->rd;
374         struct rt_rq *rt_rq;
375
376         if (unlikely(!scheduler_running))
377                 return;
378
379         for_each_leaf_rt_rq(rt_rq, rq) {
380                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
381                 s64 want;
382                 int i;
383
384                 spin_lock(&rt_b->rt_runtime_lock);
385                 spin_lock(&rt_rq->rt_runtime_lock);
386                 /*
387                  * Either we're all inf and nobody needs to borrow, or we're
388                  * already disabled and thus have nothing to do, or we have
389                  * exactly the right amount of runtime to take out.
390                  */
391                 if (rt_rq->rt_runtime == RUNTIME_INF ||
392                                 rt_rq->rt_runtime == rt_b->rt_runtime)
393                         goto balanced;
394                 spin_unlock(&rt_rq->rt_runtime_lock);
395
396                 /*
397                  * Calculate the difference between what we started out with
398                  * and what we current have, that's the amount of runtime
399                  * we lend and now have to reclaim.
400                  */
401                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
402
403                 /*
404                  * Greedy reclaim, take back as much as we can.
405                  */
406                 for_each_cpu(i, rd->span) {
407                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
408                         s64 diff;
409
410                         /*
411                          * Can't reclaim from ourselves or disabled runqueues.
412                          */
413                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
414                                 continue;
415
416                         spin_lock(&iter->rt_runtime_lock);
417                         if (want > 0) {
418                                 diff = min_t(s64, iter->rt_runtime, want);
419                                 iter->rt_runtime -= diff;
420                                 want -= diff;
421                         } else {
422                                 iter->rt_runtime -= want;
423                                 want -= want;
424                         }
425                         spin_unlock(&iter->rt_runtime_lock);
426
427                         if (!want)
428                                 break;
429                 }
430
431                 spin_lock(&rt_rq->rt_runtime_lock);
432                 /*
433                  * We cannot be left wanting - that would mean some runtime
434                  * leaked out of the system.
435                  */
436                 BUG_ON(want);
437 balanced:
438                 /*
439                  * Disable all the borrow logic by pretending we have inf
440                  * runtime - in which case borrowing doesn't make sense.
441                  */
442                 rt_rq->rt_runtime = RUNTIME_INF;
443                 spin_unlock(&rt_rq->rt_runtime_lock);
444                 spin_unlock(&rt_b->rt_runtime_lock);
445         }
446 }
447
448 static void disable_runtime(struct rq *rq)
449 {
450         unsigned long flags;
451
452         spin_lock_irqsave(&rq->lock, flags);
453         __disable_runtime(rq);
454         spin_unlock_irqrestore(&rq->lock, flags);
455 }
456
457 static void __enable_runtime(struct rq *rq)
458 {
459         struct rt_rq *rt_rq;
460
461         if (unlikely(!scheduler_running))
462                 return;
463
464         /*
465          * Reset each runqueue's bandwidth settings
466          */
467         for_each_leaf_rt_rq(rt_rq, rq) {
468                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
469
470                 spin_lock(&rt_b->rt_runtime_lock);
471                 spin_lock(&rt_rq->rt_runtime_lock);
472                 rt_rq->rt_runtime = rt_b->rt_runtime;
473                 rt_rq->rt_time = 0;
474                 rt_rq->rt_throttled = 0;
475                 spin_unlock(&rt_rq->rt_runtime_lock);
476                 spin_unlock(&rt_b->rt_runtime_lock);
477         }
478 }
479
480 static void enable_runtime(struct rq *rq)
481 {
482         unsigned long flags;
483
484         spin_lock_irqsave(&rq->lock, flags);
485         __enable_runtime(rq);
486         spin_unlock_irqrestore(&rq->lock, flags);
487 }
488
489 static int balance_runtime(struct rt_rq *rt_rq)
490 {
491         int more = 0;
492
493         if (rt_rq->rt_time > rt_rq->rt_runtime) {
494                 spin_unlock(&rt_rq->rt_runtime_lock);
495                 more = do_balance_runtime(rt_rq);
496                 spin_lock(&rt_rq->rt_runtime_lock);
497         }
498
499         return more;
500 }
501 #else /* !CONFIG_SMP */
502 static inline int balance_runtime(struct rt_rq *rt_rq)
503 {
504         return 0;
505 }
506 #endif /* CONFIG_SMP */
507
508 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
509 {
510         int i, idle = 1;
511         const struct cpumask *span;
512
513         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
514                 return 1;
515
516         span = sched_rt_period_mask();
517         for_each_cpu(i, span) {
518                 int enqueue = 0;
519                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
520                 struct rq *rq = rq_of_rt_rq(rt_rq);
521
522                 spin_lock(&rq->lock);
523                 if (rt_rq->rt_time) {
524                         u64 runtime;
525
526                         spin_lock(&rt_rq->rt_runtime_lock);
527                         if (rt_rq->rt_throttled)
528                                 balance_runtime(rt_rq);
529                         runtime = rt_rq->rt_runtime;
530                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
531                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
532                                 rt_rq->rt_throttled = 0;
533                                 enqueue = 1;
534                         }
535                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
536                                 idle = 0;
537                         spin_unlock(&rt_rq->rt_runtime_lock);
538                 } else if (rt_rq->rt_nr_running)
539                         idle = 0;
540
541                 if (enqueue)
542                         sched_rt_rq_enqueue(rt_rq);
543                 spin_unlock(&rq->lock);
544         }
545
546         return idle;
547 }
548
549 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
550 {
551 #ifdef CONFIG_RT_GROUP_SCHED
552         struct rt_rq *rt_rq = group_rt_rq(rt_se);
553
554         if (rt_rq)
555                 return rt_rq->highest_prio.curr;
556 #endif
557
558         return rt_task_of(rt_se)->prio;
559 }
560
561 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
562 {
563         u64 runtime = sched_rt_runtime(rt_rq);
564
565         if (rt_rq->rt_throttled)
566                 return rt_rq_throttled(rt_rq);
567
568         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
569                 return 0;
570
571         balance_runtime(rt_rq);
572         runtime = sched_rt_runtime(rt_rq);
573         if (runtime == RUNTIME_INF)
574                 return 0;
575
576         if (rt_rq->rt_time > runtime) {
577                 rt_rq->rt_throttled = 1;
578                 if (rt_rq_throttled(rt_rq)) {
579                         sched_rt_rq_dequeue(rt_rq);
580                         return 1;
581                 }
582         }
583
584         return 0;
585 }
586
587 /*
588  * Update the current task's runtime statistics. Skip current tasks that
589  * are not in our scheduling class.
590  */
591 static void update_curr_rt(struct rq *rq)
592 {
593         struct task_struct *curr = rq->curr;
594         struct sched_rt_entity *rt_se = &curr->rt;
595         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
596         u64 delta_exec;
597
598         if (!task_has_rt_policy(curr))
599                 return;
600
601         delta_exec = rq->clock - curr->se.exec_start;
602         if (unlikely((s64)delta_exec < 0))
603                 delta_exec = 0;
604
605         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
606
607         curr->se.sum_exec_runtime += delta_exec;
608         account_group_exec_runtime(curr, delta_exec);
609
610         curr->se.exec_start = rq->clock;
611         cpuacct_charge(curr, delta_exec);
612
613         if (!rt_bandwidth_enabled())
614                 return;
615
616         for_each_sched_rt_entity(rt_se) {
617                 rt_rq = rt_rq_of_se(rt_se);
618
619                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
620                         spin_lock(&rt_rq->rt_runtime_lock);
621                         rt_rq->rt_time += delta_exec;
622                         if (sched_rt_runtime_exceeded(rt_rq))
623                                 resched_task(curr);
624                         spin_unlock(&rt_rq->rt_runtime_lock);
625                 }
626         }
627 }
628
629 #if defined CONFIG_SMP
630
631 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
632
633 static inline int next_prio(struct rq *rq)
634 {
635         struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
636
637         if (next && rt_prio(next->prio))
638                 return next->prio;
639         else
640                 return MAX_RT_PRIO;
641 }
642
643 static void
644 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
645 {
646         struct rq *rq = rq_of_rt_rq(rt_rq);
647
648         if (prio < prev_prio) {
649
650                 /*
651                  * If the new task is higher in priority than anything on the
652                  * run-queue, we know that the previous high becomes our
653                  * next-highest.
654                  */
655                 rt_rq->highest_prio.next = prev_prio;
656
657                 if (rq->online)
658                         cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
659
660         } else if (prio == rt_rq->highest_prio.curr)
661                 /*
662                  * If the next task is equal in priority to the highest on
663                  * the run-queue, then we implicitly know that the next highest
664                  * task cannot be any lower than current
665                  */
666                 rt_rq->highest_prio.next = prio;
667         else if (prio < rt_rq->highest_prio.next)
668                 /*
669                  * Otherwise, we need to recompute next-highest
670                  */
671                 rt_rq->highest_prio.next = next_prio(rq);
672 }
673
674 static void
675 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
676 {
677         struct rq *rq = rq_of_rt_rq(rt_rq);
678
679         if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
680                 rt_rq->highest_prio.next = next_prio(rq);
681
682         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
683                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
684 }
685
686 #else /* CONFIG_SMP */
687
688 static inline
689 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
690 static inline
691 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
692
693 #endif /* CONFIG_SMP */
694
695 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
696 static void
697 inc_rt_prio(struct rt_rq *rt_rq, int prio)
698 {
699         int prev_prio = rt_rq->highest_prio.curr;
700
701         if (prio < prev_prio)
702                 rt_rq->highest_prio.curr = prio;
703
704         inc_rt_prio_smp(rt_rq, prio, prev_prio);
705 }
706
707 static void
708 dec_rt_prio(struct rt_rq *rt_rq, int prio)
709 {
710         int prev_prio = rt_rq->highest_prio.curr;
711
712         if (rt_rq->rt_nr_running) {
713
714                 WARN_ON(prio < prev_prio);
715
716                 /*
717                  * This may have been our highest task, and therefore
718                  * we may have some recomputation to do
719                  */
720                 if (prio == prev_prio) {
721                         struct rt_prio_array *array = &rt_rq->active;
722
723                         rt_rq->highest_prio.curr =
724                                 sched_find_first_bit(array->bitmap);
725                 }
726
727         } else
728                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
729
730         dec_rt_prio_smp(rt_rq, prio, prev_prio);
731 }
732
733 #else
734
735 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
736 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
737
738 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
739
740 #ifdef CONFIG_RT_GROUP_SCHED
741
742 static void
743 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
744 {
745         if (rt_se_boosted(rt_se))
746                 rt_rq->rt_nr_boosted++;
747
748         if (rt_rq->tg)
749                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
750 }
751
752 static void
753 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
754 {
755         if (rt_se_boosted(rt_se))
756                 rt_rq->rt_nr_boosted--;
757
758         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
759 }
760
761 #else /* CONFIG_RT_GROUP_SCHED */
762
763 static void
764 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765 {
766         start_rt_bandwidth(&def_rt_bandwidth);
767 }
768
769 static inline
770 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
771
772 #endif /* CONFIG_RT_GROUP_SCHED */
773
774 static inline
775 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
776 {
777         int prio = rt_se_prio(rt_se);
778
779         WARN_ON(!rt_prio(prio));
780         rt_rq->rt_nr_running++;
781
782         inc_rt_prio(rt_rq, prio);
783         inc_rt_migration(rt_se, rt_rq);
784         inc_rt_group(rt_se, rt_rq);
785 }
786
787 static inline
788 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
789 {
790         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
791         WARN_ON(!rt_rq->rt_nr_running);
792         rt_rq->rt_nr_running--;
793
794         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
795         dec_rt_migration(rt_se, rt_rq);
796         dec_rt_group(rt_se, rt_rq);
797 }
798
799 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
800 {
801         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
802         struct rt_prio_array *array = &rt_rq->active;
803         struct rt_rq *group_rq = group_rt_rq(rt_se);
804         struct list_head *queue = array->queue + rt_se_prio(rt_se);
805
806         /*
807          * Don't enqueue the group if its throttled, or when empty.
808          * The latter is a consequence of the former when a child group
809          * get throttled and the current group doesn't have any other
810          * active members.
811          */
812         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
813                 return;
814
815         list_add_tail(&rt_se->run_list, queue);
816         __set_bit(rt_se_prio(rt_se), array->bitmap);
817
818         inc_rt_tasks(rt_se, rt_rq);
819 }
820
821 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
822 {
823         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
824         struct rt_prio_array *array = &rt_rq->active;
825
826         list_del_init(&rt_se->run_list);
827         if (list_empty(array->queue + rt_se_prio(rt_se)))
828                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
829
830         dec_rt_tasks(rt_se, rt_rq);
831 }
832
833 /*
834  * Because the prio of an upper entry depends on the lower
835  * entries, we must remove entries top - down.
836  */
837 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
838 {
839         struct sched_rt_entity *back = NULL;
840
841         for_each_sched_rt_entity(rt_se) {
842                 rt_se->back = back;
843                 back = rt_se;
844         }
845
846         for (rt_se = back; rt_se; rt_se = rt_se->back) {
847                 if (on_rt_rq(rt_se))
848                         __dequeue_rt_entity(rt_se);
849         }
850 }
851
852 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
853 {
854         dequeue_rt_stack(rt_se);
855         for_each_sched_rt_entity(rt_se)
856                 __enqueue_rt_entity(rt_se);
857 }
858
859 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
860 {
861         dequeue_rt_stack(rt_se);
862
863         for_each_sched_rt_entity(rt_se) {
864                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
865
866                 if (rt_rq && rt_rq->rt_nr_running)
867                         __enqueue_rt_entity(rt_se);
868         }
869 }
870
871 /*
872  * Adding/removing a task to/from a priority array:
873  */
874 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
875 {
876         struct sched_rt_entity *rt_se = &p->rt;
877
878         if (wakeup)
879                 rt_se->timeout = 0;
880
881         enqueue_rt_entity(rt_se);
882
883         if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
884                 enqueue_pushable_task(rq, p);
885
886         inc_cpu_load(rq, p->se.load.weight);
887 }
888
889 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
890 {
891         struct sched_rt_entity *rt_se = &p->rt;
892
893         update_curr_rt(rq);
894         dequeue_rt_entity(rt_se);
895
896         dequeue_pushable_task(rq, p);
897
898         dec_cpu_load(rq, p->se.load.weight);
899 }
900
901 /*
902  * Put task to the end of the run list without the overhead of dequeue
903  * followed by enqueue.
904  */
905 static void
906 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
907 {
908         if (on_rt_rq(rt_se)) {
909                 struct rt_prio_array *array = &rt_rq->active;
910                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
911
912                 if (head)
913                         list_move(&rt_se->run_list, queue);
914                 else
915                         list_move_tail(&rt_se->run_list, queue);
916         }
917 }
918
919 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
920 {
921         struct sched_rt_entity *rt_se = &p->rt;
922         struct rt_rq *rt_rq;
923
924         for_each_sched_rt_entity(rt_se) {
925                 rt_rq = rt_rq_of_se(rt_se);
926                 requeue_rt_entity(rt_rq, rt_se, head);
927         }
928 }
929
930 static void yield_task_rt(struct rq *rq)
931 {
932         requeue_task_rt(rq, rq->curr, 0);
933 }
934
935 #ifdef CONFIG_SMP
936 static int find_lowest_rq(struct task_struct *task);
937
938 static int select_task_rq_rt(struct task_struct *p, int sync)
939 {
940         struct rq *rq = task_rq(p);
941
942         /*
943          * If the current task is an RT task, then
944          * try to see if we can wake this RT task up on another
945          * runqueue. Otherwise simply start this RT task
946          * on its current runqueue.
947          *
948          * We want to avoid overloading runqueues. Even if
949          * the RT task is of higher priority than the current RT task.
950          * RT tasks behave differently than other tasks. If
951          * one gets preempted, we try to push it off to another queue.
952          * So trying to keep a preempting RT task on the same
953          * cache hot CPU will force the running RT task to
954          * a cold CPU. So we waste all the cache for the lower
955          * RT task in hopes of saving some of a RT task
956          * that is just being woken and probably will have
957          * cold cache anyway.
958          */
959         if (unlikely(rt_task(rq->curr)) &&
960             (p->rt.nr_cpus_allowed > 1)) {
961                 int cpu = find_lowest_rq(p);
962
963                 return (cpu == -1) ? task_cpu(p) : cpu;
964         }
965
966         /*
967          * Otherwise, just let it ride on the affined RQ and the
968          * post-schedule router will push the preempted task away
969          */
970         return task_cpu(p);
971 }
972
973 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
974 {
975         if (rq->curr->rt.nr_cpus_allowed == 1)
976                 return;
977
978         if (p->rt.nr_cpus_allowed != 1
979             && cpupri_find(&rq->rd->cpupri, p, NULL))
980                 return;
981
982         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
983                 return;
984
985         /*
986          * There appears to be other cpus that can accept
987          * current and none to run 'p', so lets reschedule
988          * to try and push current away:
989          */
990         requeue_task_rt(rq, p, 1);
991         resched_task(rq->curr);
992 }
993
994 #endif /* CONFIG_SMP */
995
996 /*
997  * Preempt the current task with a newly woken task if needed:
998  */
999 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
1000 {
1001         if (p->prio < rq->curr->prio) {
1002                 resched_task(rq->curr);
1003                 return;
1004         }
1005
1006 #ifdef CONFIG_SMP
1007         /*
1008          * If:
1009          *
1010          * - the newly woken task is of equal priority to the current task
1011          * - the newly woken task is non-migratable while current is migratable
1012          * - current will be preempted on the next reschedule
1013          *
1014          * we should check to see if current can readily move to a different
1015          * cpu.  If so, we will reschedule to allow the push logic to try
1016          * to move current somewhere else, making room for our non-migratable
1017          * task.
1018          */
1019         if (p->prio == rq->curr->prio && !need_resched())
1020                 check_preempt_equal_prio(rq, p);
1021 #endif
1022 }
1023
1024 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1025                                                    struct rt_rq *rt_rq)
1026 {
1027         struct rt_prio_array *array = &rt_rq->active;
1028         struct sched_rt_entity *next = NULL;
1029         struct list_head *queue;
1030         int idx;
1031
1032         idx = sched_find_first_bit(array->bitmap);
1033         BUG_ON(idx >= MAX_RT_PRIO);
1034
1035         queue = array->queue + idx;
1036         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1037
1038         return next;
1039 }
1040
1041 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1042 {
1043         struct sched_rt_entity *rt_se;
1044         struct task_struct *p;
1045         struct rt_rq *rt_rq;
1046
1047         rt_rq = &rq->rt;
1048
1049         if (unlikely(!rt_rq->rt_nr_running))
1050                 return NULL;
1051
1052         if (rt_rq_throttled(rt_rq))
1053                 return NULL;
1054
1055         do {
1056                 rt_se = pick_next_rt_entity(rq, rt_rq);
1057                 BUG_ON(!rt_se);
1058                 rt_rq = group_rt_rq(rt_se);
1059         } while (rt_rq);
1060
1061         p = rt_task_of(rt_se);
1062         p->se.exec_start = rq->clock;
1063
1064         return p;
1065 }
1066
1067 static inline int has_pushable_tasks(struct rq *rq)
1068 {
1069         return !plist_head_empty(&rq->rt.pushable_tasks);
1070 }
1071
1072 static struct task_struct *pick_next_task_rt(struct rq *rq)
1073 {
1074         struct task_struct *p = _pick_next_task_rt(rq);
1075
1076         /* The running task is never eligible for pushing */
1077         if (p)
1078                 dequeue_pushable_task(rq, p);
1079
1080         /*
1081          * We detect this state here so that we can avoid taking the RQ
1082          * lock again later if there is no need to push
1083          */
1084         rq->post_schedule = has_pushable_tasks(rq);
1085
1086         return p;
1087 }
1088
1089 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1090 {
1091         update_curr_rt(rq);
1092         p->se.exec_start = 0;
1093
1094         /*
1095          * The previous task needs to be made eligible for pushing
1096          * if it is still active
1097          */
1098         if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1099                 enqueue_pushable_task(rq, p);
1100 }
1101
1102 #ifdef CONFIG_SMP
1103
1104 /* Only try algorithms three times */
1105 #define RT_MAX_TRIES 3
1106
1107 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1108
1109 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1110 {
1111         if (!task_running(rq, p) &&
1112             (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1113             (p->rt.nr_cpus_allowed > 1))
1114                 return 1;
1115         return 0;
1116 }
1117
1118 /* Return the second highest RT task, NULL otherwise */
1119 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1120 {
1121         struct task_struct *next = NULL;
1122         struct sched_rt_entity *rt_se;
1123         struct rt_prio_array *array;
1124         struct rt_rq *rt_rq;
1125         int idx;
1126
1127         for_each_leaf_rt_rq(rt_rq, rq) {
1128                 array = &rt_rq->active;
1129                 idx = sched_find_first_bit(array->bitmap);
1130  next_idx:
1131                 if (idx >= MAX_RT_PRIO)
1132                         continue;
1133                 if (next && next->prio < idx)
1134                         continue;
1135                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1136                         struct task_struct *p = rt_task_of(rt_se);
1137                         if (pick_rt_task(rq, p, cpu)) {
1138                                 next = p;
1139                                 break;
1140                         }
1141                 }
1142                 if (!next) {
1143                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1144                         goto next_idx;
1145                 }
1146         }
1147
1148         return next;
1149 }
1150
1151 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1152
1153 static inline int pick_optimal_cpu(int this_cpu,
1154                                    const struct cpumask *mask)
1155 {
1156         int first;
1157
1158         /* "this_cpu" is cheaper to preempt than a remote processor */
1159         if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
1160                 return this_cpu;
1161
1162         first = cpumask_first(mask);
1163         if (first < nr_cpu_ids)
1164                 return first;
1165
1166         return -1;
1167 }
1168
1169 static int find_lowest_rq(struct task_struct *task)
1170 {
1171         struct sched_domain *sd;
1172         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1173         int this_cpu = smp_processor_id();
1174         int cpu      = task_cpu(task);
1175         cpumask_var_t domain_mask;
1176
1177         if (task->rt.nr_cpus_allowed == 1)
1178                 return -1; /* No other targets possible */
1179
1180         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1181                 return -1; /* No targets found */
1182
1183         /*
1184          * At this point we have built a mask of cpus representing the
1185          * lowest priority tasks in the system.  Now we want to elect
1186          * the best one based on our affinity and topology.
1187          *
1188          * We prioritize the last cpu that the task executed on since
1189          * it is most likely cache-hot in that location.
1190          */
1191         if (cpumask_test_cpu(cpu, lowest_mask))
1192                 return cpu;
1193
1194         /*
1195          * Otherwise, we consult the sched_domains span maps to figure
1196          * out which cpu is logically closest to our hot cache data.
1197          */
1198         if (this_cpu == cpu)
1199                 this_cpu = -1; /* Skip this_cpu opt if the same */
1200
1201         if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1202                 for_each_domain(cpu, sd) {
1203                         if (sd->flags & SD_WAKE_AFFINE) {
1204                                 int best_cpu;
1205
1206                                 cpumask_and(domain_mask,
1207                                             sched_domain_span(sd),
1208                                             lowest_mask);
1209
1210                                 best_cpu = pick_optimal_cpu(this_cpu,
1211                                                             domain_mask);
1212
1213                                 if (best_cpu != -1) {
1214                                         free_cpumask_var(domain_mask);
1215                                         return best_cpu;
1216                                 }
1217                         }
1218                 }
1219                 free_cpumask_var(domain_mask);
1220         }
1221
1222         /*
1223          * And finally, if there were no matches within the domains
1224          * just give the caller *something* to work with from the compatible
1225          * locations.
1226          */
1227         return pick_optimal_cpu(this_cpu, lowest_mask);
1228 }
1229
1230 /* Will lock the rq it finds */
1231 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1232 {
1233         struct rq *lowest_rq = NULL;
1234         int tries;
1235         int cpu;
1236
1237         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1238                 cpu = find_lowest_rq(task);
1239
1240                 if ((cpu == -1) || (cpu == rq->cpu))
1241                         break;
1242
1243                 lowest_rq = cpu_rq(cpu);
1244
1245                 /* if the prio of this runqueue changed, try again */
1246                 if (double_lock_balance(rq, lowest_rq)) {
1247                         /*
1248                          * We had to unlock the run queue. In
1249                          * the mean time, task could have
1250                          * migrated already or had its affinity changed.
1251                          * Also make sure that it wasn't scheduled on its rq.
1252                          */
1253                         if (unlikely(task_rq(task) != rq ||
1254                                      !cpumask_test_cpu(lowest_rq->cpu,
1255                                                        &task->cpus_allowed) ||
1256                                      task_running(rq, task) ||
1257                                      !task->se.on_rq)) {
1258
1259                                 spin_unlock(&lowest_rq->lock);
1260                                 lowest_rq = NULL;
1261                                 break;
1262                         }
1263                 }
1264
1265                 /* If this rq is still suitable use it. */
1266                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1267                         break;
1268
1269                 /* try again */
1270                 double_unlock_balance(rq, lowest_rq);
1271                 lowest_rq = NULL;
1272         }
1273
1274         return lowest_rq;
1275 }
1276
1277 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1278 {
1279         struct task_struct *p;
1280
1281         if (!has_pushable_tasks(rq))
1282                 return NULL;
1283
1284         p = plist_first_entry(&rq->rt.pushable_tasks,
1285                               struct task_struct, pushable_tasks);
1286
1287         BUG_ON(rq->cpu != task_cpu(p));
1288         BUG_ON(task_current(rq, p));
1289         BUG_ON(p->rt.nr_cpus_allowed <= 1);
1290
1291         BUG_ON(!p->se.on_rq);
1292         BUG_ON(!rt_task(p));
1293
1294         return p;
1295 }
1296
1297 /*
1298  * If the current CPU has more than one RT task, see if the non
1299  * running task can migrate over to a CPU that is running a task
1300  * of lesser priority.
1301  */
1302 static int push_rt_task(struct rq *rq)
1303 {
1304         struct task_struct *next_task;
1305         struct rq *lowest_rq;
1306
1307         if (!rq->rt.overloaded)
1308                 return 0;
1309
1310         next_task = pick_next_pushable_task(rq);
1311         if (!next_task)
1312                 return 0;
1313
1314  retry:
1315         if (unlikely(next_task == rq->curr)) {
1316                 WARN_ON(1);
1317                 return 0;
1318         }
1319
1320         /*
1321          * It's possible that the next_task slipped in of
1322          * higher priority than current. If that's the case
1323          * just reschedule current.
1324          */
1325         if (unlikely(next_task->prio < rq->curr->prio)) {
1326                 resched_task(rq->curr);
1327                 return 0;
1328         }
1329
1330         /* We might release rq lock */
1331         get_task_struct(next_task);
1332
1333         /* find_lock_lowest_rq locks the rq if found */
1334         lowest_rq = find_lock_lowest_rq(next_task, rq);
1335         if (!lowest_rq) {
1336                 struct task_struct *task;
1337                 /*
1338                  * find lock_lowest_rq releases rq->lock
1339                  * so it is possible that next_task has migrated.
1340                  *
1341                  * We need to make sure that the task is still on the same
1342                  * run-queue and is also still the next task eligible for
1343                  * pushing.
1344                  */
1345                 task = pick_next_pushable_task(rq);
1346                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1347                         /*
1348                          * If we get here, the task hasnt moved at all, but
1349                          * it has failed to push.  We will not try again,
1350                          * since the other cpus will pull from us when they
1351                          * are ready.
1352                          */
1353                         dequeue_pushable_task(rq, next_task);
1354                         goto out;
1355                 }
1356
1357                 if (!task)
1358                         /* No more tasks, just exit */
1359                         goto out;
1360
1361                 /*
1362                  * Something has shifted, try again.
1363                  */
1364                 put_task_struct(next_task);
1365                 next_task = task;
1366                 goto retry;
1367         }
1368
1369         deactivate_task(rq, next_task, 0);
1370         set_task_cpu(next_task, lowest_rq->cpu);
1371         activate_task(lowest_rq, next_task, 0);
1372
1373         resched_task(lowest_rq->curr);
1374
1375         double_unlock_balance(rq, lowest_rq);
1376
1377 out:
1378         put_task_struct(next_task);
1379
1380         return 1;
1381 }
1382
1383 static void push_rt_tasks(struct rq *rq)
1384 {
1385         /* push_rt_task will return true if it moved an RT */
1386         while (push_rt_task(rq))
1387                 ;
1388 }
1389
1390 static int pull_rt_task(struct rq *this_rq)
1391 {
1392         int this_cpu = this_rq->cpu, ret = 0, cpu;
1393         struct task_struct *p;
1394         struct rq *src_rq;
1395
1396         if (likely(!rt_overloaded(this_rq)))
1397                 return 0;
1398
1399         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1400                 if (this_cpu == cpu)
1401                         continue;
1402
1403                 src_rq = cpu_rq(cpu);
1404
1405                 /*
1406                  * Don't bother taking the src_rq->lock if the next highest
1407                  * task is known to be lower-priority than our current task.
1408                  * This may look racy, but if this value is about to go
1409                  * logically higher, the src_rq will push this task away.
1410                  * And if its going logically lower, we do not care
1411                  */
1412                 if (src_rq->rt.highest_prio.next >=
1413                     this_rq->rt.highest_prio.curr)
1414                         continue;
1415
1416                 /*
1417                  * We can potentially drop this_rq's lock in
1418                  * double_lock_balance, and another CPU could
1419                  * alter this_rq
1420                  */
1421                 double_lock_balance(this_rq, src_rq);
1422
1423                 /*
1424                  * Are there still pullable RT tasks?
1425                  */
1426                 if (src_rq->rt.rt_nr_running <= 1)
1427                         goto skip;
1428
1429                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1430
1431                 /*
1432                  * Do we have an RT task that preempts
1433                  * the to-be-scheduled task?
1434                  */
1435                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1436                         WARN_ON(p == src_rq->curr);
1437                         WARN_ON(!p->se.on_rq);
1438
1439                         /*
1440                          * There's a chance that p is higher in priority
1441                          * than what's currently running on its cpu.
1442                          * This is just that p is wakeing up and hasn't
1443                          * had a chance to schedule. We only pull
1444                          * p if it is lower in priority than the
1445                          * current task on the run queue
1446                          */
1447                         if (p->prio < src_rq->curr->prio)
1448                                 goto skip;
1449
1450                         ret = 1;
1451
1452                         deactivate_task(src_rq, p, 0);
1453                         set_task_cpu(p, this_cpu);
1454                         activate_task(this_rq, p, 0);
1455                         /*
1456                          * We continue with the search, just in
1457                          * case there's an even higher prio task
1458                          * in another runqueue. (low likelyhood
1459                          * but possible)
1460                          */
1461                 }
1462  skip:
1463                 double_unlock_balance(this_rq, src_rq);
1464         }
1465
1466         return ret;
1467 }
1468
1469 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1470 {
1471         /* Try to pull RT tasks here if we lower this rq's prio */
1472         if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1473                 pull_rt_task(rq);
1474 }
1475
1476 static void post_schedule_rt(struct rq *rq)
1477 {
1478         push_rt_tasks(rq);
1479 }
1480
1481 /*
1482  * If we are not running and we are not going to reschedule soon, we should
1483  * try to push tasks away now
1484  */
1485 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1486 {
1487         if (!task_running(rq, p) &&
1488             !test_tsk_need_resched(rq->curr) &&
1489             has_pushable_tasks(rq) &&
1490             p->rt.nr_cpus_allowed > 1)
1491                 push_rt_tasks(rq);
1492 }
1493
1494 static unsigned long
1495 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1496                 unsigned long max_load_move,
1497                 struct sched_domain *sd, enum cpu_idle_type idle,
1498                 int *all_pinned, int *this_best_prio)
1499 {
1500         /* don't touch RT tasks */
1501         return 0;
1502 }
1503
1504 static int
1505 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1506                  struct sched_domain *sd, enum cpu_idle_type idle)
1507 {
1508         /* don't touch RT tasks */
1509         return 0;
1510 }
1511
1512 static void set_cpus_allowed_rt(struct task_struct *p,
1513                                 const struct cpumask *new_mask)
1514 {
1515         int weight = cpumask_weight(new_mask);
1516
1517         BUG_ON(!rt_task(p));
1518
1519         /*
1520          * Update the migration status of the RQ if we have an RT task
1521          * which is running AND changing its weight value.
1522          */
1523         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1524                 struct rq *rq = task_rq(p);
1525
1526                 if (!task_current(rq, p)) {
1527                         /*
1528                          * Make sure we dequeue this task from the pushable list
1529                          * before going further.  It will either remain off of
1530                          * the list because we are no longer pushable, or it
1531                          * will be requeued.
1532                          */
1533                         if (p->rt.nr_cpus_allowed > 1)
1534                                 dequeue_pushable_task(rq, p);
1535
1536                         /*
1537                          * Requeue if our weight is changing and still > 1
1538                          */
1539                         if (weight > 1)
1540                                 enqueue_pushable_task(rq, p);
1541
1542                 }
1543
1544                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1545                         rq->rt.rt_nr_migratory++;
1546                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1547                         BUG_ON(!rq->rt.rt_nr_migratory);
1548                         rq->rt.rt_nr_migratory--;
1549                 }
1550
1551                 update_rt_migration(&rq->rt);
1552         }
1553
1554         cpumask_copy(&p->cpus_allowed, new_mask);
1555         p->rt.nr_cpus_allowed = weight;
1556 }
1557
1558 /* Assumes rq->lock is held */
1559 static void rq_online_rt(struct rq *rq)
1560 {
1561         if (rq->rt.overloaded)
1562                 rt_set_overload(rq);
1563
1564         __enable_runtime(rq);
1565
1566         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1567 }
1568
1569 /* Assumes rq->lock is held */
1570 static void rq_offline_rt(struct rq *rq)
1571 {
1572         if (rq->rt.overloaded)
1573                 rt_clear_overload(rq);
1574
1575         __disable_runtime(rq);
1576
1577         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1578 }
1579
1580 /*
1581  * When switch from the rt queue, we bring ourselves to a position
1582  * that we might want to pull RT tasks from other runqueues.
1583  */
1584 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1585                            int running)
1586 {
1587         /*
1588          * If there are other RT tasks then we will reschedule
1589          * and the scheduling of the other RT tasks will handle
1590          * the balancing. But if we are the last RT task
1591          * we may need to handle the pulling of RT tasks
1592          * now.
1593          */
1594         if (!rq->rt.rt_nr_running)
1595                 pull_rt_task(rq);
1596 }
1597
1598 static inline void init_sched_rt_class(void)
1599 {
1600         unsigned int i;
1601
1602         for_each_possible_cpu(i)
1603                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1604                                         GFP_KERNEL, cpu_to_node(i));
1605 }
1606 #endif /* CONFIG_SMP */
1607
1608 /*
1609  * When switching a task to RT, we may overload the runqueue
1610  * with RT tasks. In this case we try to push them off to
1611  * other runqueues.
1612  */
1613 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1614                            int running)
1615 {
1616         int check_resched = 1;
1617
1618         /*
1619          * If we are already running, then there's nothing
1620          * that needs to be done. But if we are not running
1621          * we may need to preempt the current running task.
1622          * If that current running task is also an RT task
1623          * then see if we can move to another run queue.
1624          */
1625         if (!running) {
1626 #ifdef CONFIG_SMP
1627                 if (rq->rt.overloaded && push_rt_task(rq) &&
1628                     /* Don't resched if we changed runqueues */
1629                     rq != task_rq(p))
1630                         check_resched = 0;
1631 #endif /* CONFIG_SMP */
1632                 if (check_resched && p->prio < rq->curr->prio)
1633                         resched_task(rq->curr);
1634         }
1635 }
1636
1637 /*
1638  * Priority of the task has changed. This may cause
1639  * us to initiate a push or pull.
1640  */
1641 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1642                             int oldprio, int running)
1643 {
1644         if (running) {
1645 #ifdef CONFIG_SMP
1646                 /*
1647                  * If our priority decreases while running, we
1648                  * may need to pull tasks to this runqueue.
1649                  */
1650                 if (oldprio < p->prio)
1651                         pull_rt_task(rq);
1652                 /*
1653                  * If there's a higher priority task waiting to run
1654                  * then reschedule. Note, the above pull_rt_task
1655                  * can release the rq lock and p could migrate.
1656                  * Only reschedule if p is still on the same runqueue.
1657                  */
1658                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1659                         resched_task(p);
1660 #else
1661                 /* For UP simply resched on drop of prio */
1662                 if (oldprio < p->prio)
1663                         resched_task(p);
1664 #endif /* CONFIG_SMP */
1665         } else {
1666                 /*
1667                  * This task is not running, but if it is
1668                  * greater than the current running task
1669                  * then reschedule.
1670                  */
1671                 if (p->prio < rq->curr->prio)
1672                         resched_task(rq->curr);
1673         }
1674 }
1675
1676 static void watchdog(struct rq *rq, struct task_struct *p)
1677 {
1678         unsigned long soft, hard;
1679
1680         if (!p->signal)
1681                 return;
1682
1683         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1684         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1685
1686         if (soft != RLIM_INFINITY) {
1687                 unsigned long next;
1688
1689                 p->rt.timeout++;
1690                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1691                 if (p->rt.timeout > next)
1692                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1693         }
1694 }
1695
1696 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1697 {
1698         update_curr_rt(rq);
1699
1700         watchdog(rq, p);
1701
1702         /*
1703          * RR tasks need a special form of timeslice management.
1704          * FIFO tasks have no timeslices.
1705          */
1706         if (p->policy != SCHED_RR)
1707                 return;
1708
1709         if (--p->rt.time_slice)
1710                 return;
1711
1712         p->rt.time_slice = DEF_TIMESLICE;
1713
1714         /*
1715          * Requeue to the end of queue if we are not the only element
1716          * on the queue:
1717          */
1718         if (p->rt.run_list.prev != p->rt.run_list.next) {
1719                 requeue_task_rt(rq, p, 0);
1720                 set_tsk_need_resched(p);
1721         }
1722 }
1723
1724 static void set_curr_task_rt(struct rq *rq)
1725 {
1726         struct task_struct *p = rq->curr;
1727
1728         p->se.exec_start = rq->clock;
1729
1730         /* The running task is never eligible for pushing */
1731         dequeue_pushable_task(rq, p);
1732 }
1733
1734 static const struct sched_class rt_sched_class = {
1735         .next                   = &fair_sched_class,
1736         .enqueue_task           = enqueue_task_rt,
1737         .dequeue_task           = dequeue_task_rt,
1738         .yield_task             = yield_task_rt,
1739
1740         .check_preempt_curr     = check_preempt_curr_rt,
1741
1742         .pick_next_task         = pick_next_task_rt,
1743         .put_prev_task          = put_prev_task_rt,
1744
1745 #ifdef CONFIG_SMP
1746         .select_task_rq         = select_task_rq_rt,
1747
1748         .load_balance           = load_balance_rt,
1749         .move_one_task          = move_one_task_rt,
1750         .set_cpus_allowed       = set_cpus_allowed_rt,
1751         .rq_online              = rq_online_rt,
1752         .rq_offline             = rq_offline_rt,
1753         .pre_schedule           = pre_schedule_rt,
1754         .post_schedule          = post_schedule_rt,
1755         .task_wake_up           = task_wake_up_rt,
1756         .switched_from          = switched_from_rt,
1757 #endif
1758
1759         .set_curr_task          = set_curr_task_rt,
1760         .task_tick              = task_tick_rt,
1761
1762         .prio_changed           = prio_changed_rt,
1763         .switched_to            = switched_to_rt,
1764 };
1765
1766 #ifdef CONFIG_SCHED_DEBUG
1767 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1768
1769 static void print_rt_stats(struct seq_file *m, int cpu)
1770 {
1771         struct rt_rq *rt_rq;
1772
1773         rcu_read_lock();
1774         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1775                 print_rt_rq(m, cpu, rt_rq);
1776         rcu_read_unlock();
1777 }
1778 #endif /* CONFIG_SCHED_DEBUG */
1779