sched: Fix cpupri build on !CONFIG_SMP
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_RT_GROUP_SCHED
7
8 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11 {
12 #ifdef CONFIG_SCHED_DEBUG
13         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14 #endif
15         return container_of(rt_se, struct task_struct, rt);
16 }
17
18 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19 {
20         return rt_rq->rq;
21 }
22
23 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24 {
25         return rt_se->rt_rq;
26 }
27
28 #else /* CONFIG_RT_GROUP_SCHED */
29
30 #define rt_entity_is_task(rt_se) (1)
31
32 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33 {
34         return container_of(rt_se, struct task_struct, rt);
35 }
36
37 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38 {
39         return container_of(rt_rq, struct rq, rt);
40 }
41
42 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43 {
44         struct task_struct *p = rt_task_of(rt_se);
45         struct rq *rq = task_rq(p);
46
47         return &rq->rt;
48 }
49
50 #endif /* CONFIG_RT_GROUP_SCHED */
51
52 #ifdef CONFIG_SMP
53
54 static inline int rt_overloaded(struct rq *rq)
55 {
56         return atomic_read(&rq->rd->rto_count);
57 }
58
59 static inline void rt_set_overload(struct rq *rq)
60 {
61         if (!rq->online)
62                 return;
63
64         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65         /*
66          * Make sure the mask is visible before we set
67          * the overload count. That is checked to determine
68          * if we should look at the mask. It would be a shame
69          * if we looked at the mask, but the mask was not
70          * updated yet.
71          */
72         wmb();
73         atomic_inc(&rq->rd->rto_count);
74 }
75
76 static inline void rt_clear_overload(struct rq *rq)
77 {
78         if (!rq->online)
79                 return;
80
81         /* the order here really doesn't matter */
82         atomic_dec(&rq->rd->rto_count);
83         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84 }
85
86 static void update_rt_migration(struct rt_rq *rt_rq)
87 {
88         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89                 if (!rt_rq->overloaded) {
90                         rt_set_overload(rq_of_rt_rq(rt_rq));
91                         rt_rq->overloaded = 1;
92                 }
93         } else if (rt_rq->overloaded) {
94                 rt_clear_overload(rq_of_rt_rq(rt_rq));
95                 rt_rq->overloaded = 0;
96         }
97 }
98
99 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100 {
101         if (!rt_entity_is_task(rt_se))
102                 return;
103
104         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106         rt_rq->rt_nr_total++;
107         if (rt_se->nr_cpus_allowed > 1)
108                 rt_rq->rt_nr_migratory++;
109
110         update_rt_migration(rt_rq);
111 }
112
113 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114 {
115         if (!rt_entity_is_task(rt_se))
116                 return;
117
118         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120         rt_rq->rt_nr_total--;
121         if (rt_se->nr_cpus_allowed > 1)
122                 rt_rq->rt_nr_migratory--;
123
124         update_rt_migration(rt_rq);
125 }
126
127 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128 {
129         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130         plist_node_init(&p->pushable_tasks, p->prio);
131         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132 }
133
134 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135 {
136         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137 }
138
139 static inline int has_pushable_tasks(struct rq *rq)
140 {
141         return !plist_head_empty(&rq->rt.pushable_tasks);
142 }
143
144 #else
145
146 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147 {
148 }
149
150 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151 {
152 }
153
154 static inline
155 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156 {
157 }
158
159 static inline
160 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161 {
162 }
163
164 #endif /* CONFIG_SMP */
165
166 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167 {
168         return !list_empty(&rt_se->run_list);
169 }
170
171 #ifdef CONFIG_RT_GROUP_SCHED
172
173 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
174 {
175         if (!rt_rq->tg)
176                 return RUNTIME_INF;
177
178         return rt_rq->rt_runtime;
179 }
180
181 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182 {
183         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
184 }
185
186 #define for_each_leaf_rt_rq(rt_rq, rq) \
187         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
188
189 #define for_each_sched_rt_entity(rt_se) \
190         for (; rt_se; rt_se = rt_se->parent)
191
192 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
193 {
194         return rt_se->my_q;
195 }
196
197 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
198 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
199
200 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
203         struct sched_rt_entity *rt_se = rt_rq->rt_se;
204
205         if (rt_rq->rt_nr_running) {
206                 if (rt_se && !on_rt_rq(rt_se))
207                         enqueue_rt_entity(rt_se);
208                 if (rt_rq->highest_prio.curr < curr->prio)
209                         resched_task(curr);
210         }
211 }
212
213 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
214 {
215         struct sched_rt_entity *rt_se = rt_rq->rt_se;
216
217         if (rt_se && on_rt_rq(rt_se))
218                 dequeue_rt_entity(rt_se);
219 }
220
221 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
222 {
223         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
224 }
225
226 static int rt_se_boosted(struct sched_rt_entity *rt_se)
227 {
228         struct rt_rq *rt_rq = group_rt_rq(rt_se);
229         struct task_struct *p;
230
231         if (rt_rq)
232                 return !!rt_rq->rt_nr_boosted;
233
234         p = rt_task_of(rt_se);
235         return p->prio != p->normal_prio;
236 }
237
238 #ifdef CONFIG_SMP
239 static inline const struct cpumask *sched_rt_period_mask(void)
240 {
241         return cpu_rq(smp_processor_id())->rd->span;
242 }
243 #else
244 static inline const struct cpumask *sched_rt_period_mask(void)
245 {
246         return cpu_online_mask;
247 }
248 #endif
249
250 static inline
251 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
252 {
253         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
254 }
255
256 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
257 {
258         return &rt_rq->tg->rt_bandwidth;
259 }
260
261 #else /* !CONFIG_RT_GROUP_SCHED */
262
263 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
264 {
265         return rt_rq->rt_runtime;
266 }
267
268 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
269 {
270         return ktime_to_ns(def_rt_bandwidth.rt_period);
271 }
272
273 #define for_each_leaf_rt_rq(rt_rq, rq) \
274         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
275
276 #define for_each_sched_rt_entity(rt_se) \
277         for (; rt_se; rt_se = NULL)
278
279 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
280 {
281         return NULL;
282 }
283
284 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
285 {
286         if (rt_rq->rt_nr_running)
287                 resched_task(rq_of_rt_rq(rt_rq)->curr);
288 }
289
290 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
291 {
292 }
293
294 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
295 {
296         return rt_rq->rt_throttled;
297 }
298
299 static inline const struct cpumask *sched_rt_period_mask(void)
300 {
301         return cpu_online_mask;
302 }
303
304 static inline
305 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
306 {
307         return &cpu_rq(cpu)->rt;
308 }
309
310 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
311 {
312         return &def_rt_bandwidth;
313 }
314
315 #endif /* CONFIG_RT_GROUP_SCHED */
316
317 #ifdef CONFIG_SMP
318 /*
319  * We ran out of runtime, see if we can borrow some from our neighbours.
320  */
321 static int do_balance_runtime(struct rt_rq *rt_rq)
322 {
323         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
324         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
325         int i, weight, more = 0;
326         u64 rt_period;
327
328         weight = cpumask_weight(rd->span);
329
330         spin_lock(&rt_b->rt_runtime_lock);
331         rt_period = ktime_to_ns(rt_b->rt_period);
332         for_each_cpu(i, rd->span) {
333                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
334                 s64 diff;
335
336                 if (iter == rt_rq)
337                         continue;
338
339                 spin_lock(&iter->rt_runtime_lock);
340                 /*
341                  * Either all rqs have inf runtime and there's nothing to steal
342                  * or __disable_runtime() below sets a specific rq to inf to
343                  * indicate its been disabled and disalow stealing.
344                  */
345                 if (iter->rt_runtime == RUNTIME_INF)
346                         goto next;
347
348                 /*
349                  * From runqueues with spare time, take 1/n part of their
350                  * spare time, but no more than our period.
351                  */
352                 diff = iter->rt_runtime - iter->rt_time;
353                 if (diff > 0) {
354                         diff = div_u64((u64)diff, weight);
355                         if (rt_rq->rt_runtime + diff > rt_period)
356                                 diff = rt_period - rt_rq->rt_runtime;
357                         iter->rt_runtime -= diff;
358                         rt_rq->rt_runtime += diff;
359                         more = 1;
360                         if (rt_rq->rt_runtime == rt_period) {
361                                 spin_unlock(&iter->rt_runtime_lock);
362                                 break;
363                         }
364                 }
365 next:
366                 spin_unlock(&iter->rt_runtime_lock);
367         }
368         spin_unlock(&rt_b->rt_runtime_lock);
369
370         return more;
371 }
372
373 /*
374  * Ensure this RQ takes back all the runtime it lend to its neighbours.
375  */
376 static void __disable_runtime(struct rq *rq)
377 {
378         struct root_domain *rd = rq->rd;
379         struct rt_rq *rt_rq;
380
381         if (unlikely(!scheduler_running))
382                 return;
383
384         for_each_leaf_rt_rq(rt_rq, rq) {
385                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
386                 s64 want;
387                 int i;
388
389                 spin_lock(&rt_b->rt_runtime_lock);
390                 spin_lock(&rt_rq->rt_runtime_lock);
391                 /*
392                  * Either we're all inf and nobody needs to borrow, or we're
393                  * already disabled and thus have nothing to do, or we have
394                  * exactly the right amount of runtime to take out.
395                  */
396                 if (rt_rq->rt_runtime == RUNTIME_INF ||
397                                 rt_rq->rt_runtime == rt_b->rt_runtime)
398                         goto balanced;
399                 spin_unlock(&rt_rq->rt_runtime_lock);
400
401                 /*
402                  * Calculate the difference between what we started out with
403                  * and what we current have, that's the amount of runtime
404                  * we lend and now have to reclaim.
405                  */
406                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
407
408                 /*
409                  * Greedy reclaim, take back as much as we can.
410                  */
411                 for_each_cpu(i, rd->span) {
412                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
413                         s64 diff;
414
415                         /*
416                          * Can't reclaim from ourselves or disabled runqueues.
417                          */
418                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
419                                 continue;
420
421                         spin_lock(&iter->rt_runtime_lock);
422                         if (want > 0) {
423                                 diff = min_t(s64, iter->rt_runtime, want);
424                                 iter->rt_runtime -= diff;
425                                 want -= diff;
426                         } else {
427                                 iter->rt_runtime -= want;
428                                 want -= want;
429                         }
430                         spin_unlock(&iter->rt_runtime_lock);
431
432                         if (!want)
433                                 break;
434                 }
435
436                 spin_lock(&rt_rq->rt_runtime_lock);
437                 /*
438                  * We cannot be left wanting - that would mean some runtime
439                  * leaked out of the system.
440                  */
441                 BUG_ON(want);
442 balanced:
443                 /*
444                  * Disable all the borrow logic by pretending we have inf
445                  * runtime - in which case borrowing doesn't make sense.
446                  */
447                 rt_rq->rt_runtime = RUNTIME_INF;
448                 spin_unlock(&rt_rq->rt_runtime_lock);
449                 spin_unlock(&rt_b->rt_runtime_lock);
450         }
451 }
452
453 static void disable_runtime(struct rq *rq)
454 {
455         unsigned long flags;
456
457         spin_lock_irqsave(&rq->lock, flags);
458         __disable_runtime(rq);
459         spin_unlock_irqrestore(&rq->lock, flags);
460 }
461
462 static void __enable_runtime(struct rq *rq)
463 {
464         struct rt_rq *rt_rq;
465
466         if (unlikely(!scheduler_running))
467                 return;
468
469         /*
470          * Reset each runqueue's bandwidth settings
471          */
472         for_each_leaf_rt_rq(rt_rq, rq) {
473                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
474
475                 spin_lock(&rt_b->rt_runtime_lock);
476                 spin_lock(&rt_rq->rt_runtime_lock);
477                 rt_rq->rt_runtime = rt_b->rt_runtime;
478                 rt_rq->rt_time = 0;
479                 rt_rq->rt_throttled = 0;
480                 spin_unlock(&rt_rq->rt_runtime_lock);
481                 spin_unlock(&rt_b->rt_runtime_lock);
482         }
483 }
484
485 static void enable_runtime(struct rq *rq)
486 {
487         unsigned long flags;
488
489         spin_lock_irqsave(&rq->lock, flags);
490         __enable_runtime(rq);
491         spin_unlock_irqrestore(&rq->lock, flags);
492 }
493
494 static int balance_runtime(struct rt_rq *rt_rq)
495 {
496         int more = 0;
497
498         if (rt_rq->rt_time > rt_rq->rt_runtime) {
499                 spin_unlock(&rt_rq->rt_runtime_lock);
500                 more = do_balance_runtime(rt_rq);
501                 spin_lock(&rt_rq->rt_runtime_lock);
502         }
503
504         return more;
505 }
506 #else /* !CONFIG_SMP */
507 static inline int balance_runtime(struct rt_rq *rt_rq)
508 {
509         return 0;
510 }
511 #endif /* CONFIG_SMP */
512
513 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
514 {
515         int i, idle = 1;
516         const struct cpumask *span;
517
518         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
519                 return 1;
520
521         span = sched_rt_period_mask();
522         for_each_cpu(i, span) {
523                 int enqueue = 0;
524                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
525                 struct rq *rq = rq_of_rt_rq(rt_rq);
526
527                 spin_lock(&rq->lock);
528                 if (rt_rq->rt_time) {
529                         u64 runtime;
530
531                         spin_lock(&rt_rq->rt_runtime_lock);
532                         if (rt_rq->rt_throttled)
533                                 balance_runtime(rt_rq);
534                         runtime = rt_rq->rt_runtime;
535                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
536                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
537                                 rt_rq->rt_throttled = 0;
538                                 enqueue = 1;
539                         }
540                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
541                                 idle = 0;
542                         spin_unlock(&rt_rq->rt_runtime_lock);
543                 } else if (rt_rq->rt_nr_running)
544                         idle = 0;
545
546                 if (enqueue)
547                         sched_rt_rq_enqueue(rt_rq);
548                 spin_unlock(&rq->lock);
549         }
550
551         return idle;
552 }
553
554 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
555 {
556 #ifdef CONFIG_RT_GROUP_SCHED
557         struct rt_rq *rt_rq = group_rt_rq(rt_se);
558
559         if (rt_rq)
560                 return rt_rq->highest_prio.curr;
561 #endif
562
563         return rt_task_of(rt_se)->prio;
564 }
565
566 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
567 {
568         u64 runtime = sched_rt_runtime(rt_rq);
569
570         if (rt_rq->rt_throttled)
571                 return rt_rq_throttled(rt_rq);
572
573         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
574                 return 0;
575
576         balance_runtime(rt_rq);
577         runtime = sched_rt_runtime(rt_rq);
578         if (runtime == RUNTIME_INF)
579                 return 0;
580
581         if (rt_rq->rt_time > runtime) {
582                 rt_rq->rt_throttled = 1;
583                 if (rt_rq_throttled(rt_rq)) {
584                         sched_rt_rq_dequeue(rt_rq);
585                         return 1;
586                 }
587         }
588
589         return 0;
590 }
591
592 /*
593  * Update the current task's runtime statistics. Skip current tasks that
594  * are not in our scheduling class.
595  */
596 static void update_curr_rt(struct rq *rq)
597 {
598         struct task_struct *curr = rq->curr;
599         struct sched_rt_entity *rt_se = &curr->rt;
600         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
601         u64 delta_exec;
602
603         if (!task_has_rt_policy(curr))
604                 return;
605
606         delta_exec = rq->clock - curr->se.exec_start;
607         if (unlikely((s64)delta_exec < 0))
608                 delta_exec = 0;
609
610         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
611
612         curr->se.sum_exec_runtime += delta_exec;
613         account_group_exec_runtime(curr, delta_exec);
614
615         curr->se.exec_start = rq->clock;
616         cpuacct_charge(curr, delta_exec);
617
618         if (!rt_bandwidth_enabled())
619                 return;
620
621         for_each_sched_rt_entity(rt_se) {
622                 rt_rq = rt_rq_of_se(rt_se);
623
624                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
625                         spin_lock(&rt_rq->rt_runtime_lock);
626                         rt_rq->rt_time += delta_exec;
627                         if (sched_rt_runtime_exceeded(rt_rq))
628                                 resched_task(curr);
629                         spin_unlock(&rt_rq->rt_runtime_lock);
630                 }
631         }
632 }
633
634 #if defined CONFIG_SMP
635
636 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
637
638 static inline int next_prio(struct rq *rq)
639 {
640         struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
641
642         if (next && rt_prio(next->prio))
643                 return next->prio;
644         else
645                 return MAX_RT_PRIO;
646 }
647
648 static void
649 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
650 {
651         struct rq *rq = rq_of_rt_rq(rt_rq);
652
653         if (prio < prev_prio) {
654
655                 /*
656                  * If the new task is higher in priority than anything on the
657                  * run-queue, we know that the previous high becomes our
658                  * next-highest.
659                  */
660                 rt_rq->highest_prio.next = prev_prio;
661
662                 if (rq->online)
663                         cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
664
665         } else if (prio == rt_rq->highest_prio.curr)
666                 /*
667                  * If the next task is equal in priority to the highest on
668                  * the run-queue, then we implicitly know that the next highest
669                  * task cannot be any lower than current
670                  */
671                 rt_rq->highest_prio.next = prio;
672         else if (prio < rt_rq->highest_prio.next)
673                 /*
674                  * Otherwise, we need to recompute next-highest
675                  */
676                 rt_rq->highest_prio.next = next_prio(rq);
677 }
678
679 static void
680 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
681 {
682         struct rq *rq = rq_of_rt_rq(rt_rq);
683
684         if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
685                 rt_rq->highest_prio.next = next_prio(rq);
686
687         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
688                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
689 }
690
691 #else /* CONFIG_SMP */
692
693 static inline
694 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
695 static inline
696 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
697
698 #endif /* CONFIG_SMP */
699
700 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
701 static void
702 inc_rt_prio(struct rt_rq *rt_rq, int prio)
703 {
704         int prev_prio = rt_rq->highest_prio.curr;
705
706         if (prio < prev_prio)
707                 rt_rq->highest_prio.curr = prio;
708
709         inc_rt_prio_smp(rt_rq, prio, prev_prio);
710 }
711
712 static void
713 dec_rt_prio(struct rt_rq *rt_rq, int prio)
714 {
715         int prev_prio = rt_rq->highest_prio.curr;
716
717         if (rt_rq->rt_nr_running) {
718
719                 WARN_ON(prio < prev_prio);
720
721                 /*
722                  * This may have been our highest task, and therefore
723                  * we may have some recomputation to do
724                  */
725                 if (prio == prev_prio) {
726                         struct rt_prio_array *array = &rt_rq->active;
727
728                         rt_rq->highest_prio.curr =
729                                 sched_find_first_bit(array->bitmap);
730                 }
731
732         } else
733                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
734
735         dec_rt_prio_smp(rt_rq, prio, prev_prio);
736 }
737
738 #else
739
740 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
741 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
742
743 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
744
745 #ifdef CONFIG_RT_GROUP_SCHED
746
747 static void
748 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
749 {
750         if (rt_se_boosted(rt_se))
751                 rt_rq->rt_nr_boosted++;
752
753         if (rt_rq->tg)
754                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
755 }
756
757 static void
758 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
759 {
760         if (rt_se_boosted(rt_se))
761                 rt_rq->rt_nr_boosted--;
762
763         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
764 }
765
766 #else /* CONFIG_RT_GROUP_SCHED */
767
768 static void
769 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
770 {
771         start_rt_bandwidth(&def_rt_bandwidth);
772 }
773
774 static inline
775 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
776
777 #endif /* CONFIG_RT_GROUP_SCHED */
778
779 static inline
780 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
781 {
782         int prio = rt_se_prio(rt_se);
783
784         WARN_ON(!rt_prio(prio));
785         rt_rq->rt_nr_running++;
786
787         inc_rt_prio(rt_rq, prio);
788         inc_rt_migration(rt_se, rt_rq);
789         inc_rt_group(rt_se, rt_rq);
790 }
791
792 static inline
793 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
794 {
795         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
796         WARN_ON(!rt_rq->rt_nr_running);
797         rt_rq->rt_nr_running--;
798
799         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
800         dec_rt_migration(rt_se, rt_rq);
801         dec_rt_group(rt_se, rt_rq);
802 }
803
804 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
805 {
806         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
807         struct rt_prio_array *array = &rt_rq->active;
808         struct rt_rq *group_rq = group_rt_rq(rt_se);
809         struct list_head *queue = array->queue + rt_se_prio(rt_se);
810
811         /*
812          * Don't enqueue the group if its throttled, or when empty.
813          * The latter is a consequence of the former when a child group
814          * get throttled and the current group doesn't have any other
815          * active members.
816          */
817         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
818                 return;
819
820         list_add_tail(&rt_se->run_list, queue);
821         __set_bit(rt_se_prio(rt_se), array->bitmap);
822
823         inc_rt_tasks(rt_se, rt_rq);
824 }
825
826 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
827 {
828         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
829         struct rt_prio_array *array = &rt_rq->active;
830
831         list_del_init(&rt_se->run_list);
832         if (list_empty(array->queue + rt_se_prio(rt_se)))
833                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
834
835         dec_rt_tasks(rt_se, rt_rq);
836 }
837
838 /*
839  * Because the prio of an upper entry depends on the lower
840  * entries, we must remove entries top - down.
841  */
842 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
843 {
844         struct sched_rt_entity *back = NULL;
845
846         for_each_sched_rt_entity(rt_se) {
847                 rt_se->back = back;
848                 back = rt_se;
849         }
850
851         for (rt_se = back; rt_se; rt_se = rt_se->back) {
852                 if (on_rt_rq(rt_se))
853                         __dequeue_rt_entity(rt_se);
854         }
855 }
856
857 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
858 {
859         dequeue_rt_stack(rt_se);
860         for_each_sched_rt_entity(rt_se)
861                 __enqueue_rt_entity(rt_se);
862 }
863
864 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
865 {
866         dequeue_rt_stack(rt_se);
867
868         for_each_sched_rt_entity(rt_se) {
869                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
870
871                 if (rt_rq && rt_rq->rt_nr_running)
872                         __enqueue_rt_entity(rt_se);
873         }
874 }
875
876 /*
877  * Adding/removing a task to/from a priority array:
878  */
879 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
880 {
881         struct sched_rt_entity *rt_se = &p->rt;
882
883         if (wakeup)
884                 rt_se->timeout = 0;
885
886         enqueue_rt_entity(rt_se);
887
888         if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
889                 enqueue_pushable_task(rq, p);
890
891         inc_cpu_load(rq, p->se.load.weight);
892 }
893
894 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
895 {
896         struct sched_rt_entity *rt_se = &p->rt;
897
898         update_curr_rt(rq);
899         dequeue_rt_entity(rt_se);
900
901         dequeue_pushable_task(rq, p);
902
903         dec_cpu_load(rq, p->se.load.weight);
904 }
905
906 /*
907  * Put task to the end of the run list without the overhead of dequeue
908  * followed by enqueue.
909  */
910 static void
911 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
912 {
913         if (on_rt_rq(rt_se)) {
914                 struct rt_prio_array *array = &rt_rq->active;
915                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
916
917                 if (head)
918                         list_move(&rt_se->run_list, queue);
919                 else
920                         list_move_tail(&rt_se->run_list, queue);
921         }
922 }
923
924 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
925 {
926         struct sched_rt_entity *rt_se = &p->rt;
927         struct rt_rq *rt_rq;
928
929         for_each_sched_rt_entity(rt_se) {
930                 rt_rq = rt_rq_of_se(rt_se);
931                 requeue_rt_entity(rt_rq, rt_se, head);
932         }
933 }
934
935 static void yield_task_rt(struct rq *rq)
936 {
937         requeue_task_rt(rq, rq->curr, 0);
938 }
939
940 #ifdef CONFIG_SMP
941 static int find_lowest_rq(struct task_struct *task);
942
943 static int select_task_rq_rt(struct task_struct *p, int sync)
944 {
945         struct rq *rq = task_rq(p);
946
947         /*
948          * If the current task is an RT task, then
949          * try to see if we can wake this RT task up on another
950          * runqueue. Otherwise simply start this RT task
951          * on its current runqueue.
952          *
953          * We want to avoid overloading runqueues. Even if
954          * the RT task is of higher priority than the current RT task.
955          * RT tasks behave differently than other tasks. If
956          * one gets preempted, we try to push it off to another queue.
957          * So trying to keep a preempting RT task on the same
958          * cache hot CPU will force the running RT task to
959          * a cold CPU. So we waste all the cache for the lower
960          * RT task in hopes of saving some of a RT task
961          * that is just being woken and probably will have
962          * cold cache anyway.
963          */
964         if (unlikely(rt_task(rq->curr)) &&
965             (p->rt.nr_cpus_allowed > 1)) {
966                 int cpu = find_lowest_rq(p);
967
968                 return (cpu == -1) ? task_cpu(p) : cpu;
969         }
970
971         /*
972          * Otherwise, just let it ride on the affined RQ and the
973          * post-schedule router will push the preempted task away
974          */
975         return task_cpu(p);
976 }
977
978 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
979 {
980         if (rq->curr->rt.nr_cpus_allowed == 1)
981                 return;
982
983         if (p->rt.nr_cpus_allowed != 1
984             && cpupri_find(&rq->rd->cpupri, p, NULL))
985                 return;
986
987         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
988                 return;
989
990         /*
991          * There appears to be other cpus that can accept
992          * current and none to run 'p', so lets reschedule
993          * to try and push current away:
994          */
995         requeue_task_rt(rq, p, 1);
996         resched_task(rq->curr);
997 }
998
999 #endif /* CONFIG_SMP */
1000
1001 /*
1002  * Preempt the current task with a newly woken task if needed:
1003  */
1004 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
1005 {
1006         if (p->prio < rq->curr->prio) {
1007                 resched_task(rq->curr);
1008                 return;
1009         }
1010
1011 #ifdef CONFIG_SMP
1012         /*
1013          * If:
1014          *
1015          * - the newly woken task is of equal priority to the current task
1016          * - the newly woken task is non-migratable while current is migratable
1017          * - current will be preempted on the next reschedule
1018          *
1019          * we should check to see if current can readily move to a different
1020          * cpu.  If so, we will reschedule to allow the push logic to try
1021          * to move current somewhere else, making room for our non-migratable
1022          * task.
1023          */
1024         if (p->prio == rq->curr->prio && !need_resched())
1025                 check_preempt_equal_prio(rq, p);
1026 #endif
1027 }
1028
1029 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1030                                                    struct rt_rq *rt_rq)
1031 {
1032         struct rt_prio_array *array = &rt_rq->active;
1033         struct sched_rt_entity *next = NULL;
1034         struct list_head *queue;
1035         int idx;
1036
1037         idx = sched_find_first_bit(array->bitmap);
1038         BUG_ON(idx >= MAX_RT_PRIO);
1039
1040         queue = array->queue + idx;
1041         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1042
1043         return next;
1044 }
1045
1046 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1047 {
1048         struct sched_rt_entity *rt_se;
1049         struct task_struct *p;
1050         struct rt_rq *rt_rq;
1051
1052         rt_rq = &rq->rt;
1053
1054         if (unlikely(!rt_rq->rt_nr_running))
1055                 return NULL;
1056
1057         if (rt_rq_throttled(rt_rq))
1058                 return NULL;
1059
1060         do {
1061                 rt_se = pick_next_rt_entity(rq, rt_rq);
1062                 BUG_ON(!rt_se);
1063                 rt_rq = group_rt_rq(rt_se);
1064         } while (rt_rq);
1065
1066         p = rt_task_of(rt_se);
1067         p->se.exec_start = rq->clock;
1068
1069         return p;
1070 }
1071
1072 static struct task_struct *pick_next_task_rt(struct rq *rq)
1073 {
1074         struct task_struct *p = _pick_next_task_rt(rq);
1075
1076         /* The running task is never eligible for pushing */
1077         if (p)
1078                 dequeue_pushable_task(rq, p);
1079
1080 #ifdef CONFIG_SMP
1081         /*
1082          * We detect this state here so that we can avoid taking the RQ
1083          * lock again later if there is no need to push
1084          */
1085         rq->post_schedule = has_pushable_tasks(rq);
1086 #endif
1087
1088         return p;
1089 }
1090
1091 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1092 {
1093         update_curr_rt(rq);
1094         p->se.exec_start = 0;
1095
1096         /*
1097          * The previous task needs to be made eligible for pushing
1098          * if it is still active
1099          */
1100         if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1101                 enqueue_pushable_task(rq, p);
1102 }
1103
1104 #ifdef CONFIG_SMP
1105
1106 /* Only try algorithms three times */
1107 #define RT_MAX_TRIES 3
1108
1109 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1110
1111 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1112 {
1113         if (!task_running(rq, p) &&
1114             (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1115             (p->rt.nr_cpus_allowed > 1))
1116                 return 1;
1117         return 0;
1118 }
1119
1120 /* Return the second highest RT task, NULL otherwise */
1121 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1122 {
1123         struct task_struct *next = NULL;
1124         struct sched_rt_entity *rt_se;
1125         struct rt_prio_array *array;
1126         struct rt_rq *rt_rq;
1127         int idx;
1128
1129         for_each_leaf_rt_rq(rt_rq, rq) {
1130                 array = &rt_rq->active;
1131                 idx = sched_find_first_bit(array->bitmap);
1132  next_idx:
1133                 if (idx >= MAX_RT_PRIO)
1134                         continue;
1135                 if (next && next->prio < idx)
1136                         continue;
1137                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1138                         struct task_struct *p = rt_task_of(rt_se);
1139                         if (pick_rt_task(rq, p, cpu)) {
1140                                 next = p;
1141                                 break;
1142                         }
1143                 }
1144                 if (!next) {
1145                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1146                         goto next_idx;
1147                 }
1148         }
1149
1150         return next;
1151 }
1152
1153 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1154
1155 static inline int pick_optimal_cpu(int this_cpu,
1156                                    const struct cpumask *mask)
1157 {
1158         int first;
1159
1160         /* "this_cpu" is cheaper to preempt than a remote processor */
1161         if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
1162                 return this_cpu;
1163
1164         first = cpumask_first(mask);
1165         if (first < nr_cpu_ids)
1166                 return first;
1167
1168         return -1;
1169 }
1170
1171 static int find_lowest_rq(struct task_struct *task)
1172 {
1173         struct sched_domain *sd;
1174         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1175         int this_cpu = smp_processor_id();
1176         int cpu      = task_cpu(task);
1177         cpumask_var_t domain_mask;
1178
1179         if (task->rt.nr_cpus_allowed == 1)
1180                 return -1; /* No other targets possible */
1181
1182         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1183                 return -1; /* No targets found */
1184
1185         /*
1186          * At this point we have built a mask of cpus representing the
1187          * lowest priority tasks in the system.  Now we want to elect
1188          * the best one based on our affinity and topology.
1189          *
1190          * We prioritize the last cpu that the task executed on since
1191          * it is most likely cache-hot in that location.
1192          */
1193         if (cpumask_test_cpu(cpu, lowest_mask))
1194                 return cpu;
1195
1196         /*
1197          * Otherwise, we consult the sched_domains span maps to figure
1198          * out which cpu is logically closest to our hot cache data.
1199          */
1200         if (this_cpu == cpu)
1201                 this_cpu = -1; /* Skip this_cpu opt if the same */
1202
1203         if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1204                 for_each_domain(cpu, sd) {
1205                         if (sd->flags & SD_WAKE_AFFINE) {
1206                                 int best_cpu;
1207
1208                                 cpumask_and(domain_mask,
1209                                             sched_domain_span(sd),
1210                                             lowest_mask);
1211
1212                                 best_cpu = pick_optimal_cpu(this_cpu,
1213                                                             domain_mask);
1214
1215                                 if (best_cpu != -1) {
1216                                         free_cpumask_var(domain_mask);
1217                                         return best_cpu;
1218                                 }
1219                         }
1220                 }
1221                 free_cpumask_var(domain_mask);
1222         }
1223
1224         /*
1225          * And finally, if there were no matches within the domains
1226          * just give the caller *something* to work with from the compatible
1227          * locations.
1228          */
1229         return pick_optimal_cpu(this_cpu, lowest_mask);
1230 }
1231
1232 /* Will lock the rq it finds */
1233 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1234 {
1235         struct rq *lowest_rq = NULL;
1236         int tries;
1237         int cpu;
1238
1239         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1240                 cpu = find_lowest_rq(task);
1241
1242                 if ((cpu == -1) || (cpu == rq->cpu))
1243                         break;
1244
1245                 lowest_rq = cpu_rq(cpu);
1246
1247                 /* if the prio of this runqueue changed, try again */
1248                 if (double_lock_balance(rq, lowest_rq)) {
1249                         /*
1250                          * We had to unlock the run queue. In
1251                          * the mean time, task could have
1252                          * migrated already or had its affinity changed.
1253                          * Also make sure that it wasn't scheduled on its rq.
1254                          */
1255                         if (unlikely(task_rq(task) != rq ||
1256                                      !cpumask_test_cpu(lowest_rq->cpu,
1257                                                        &task->cpus_allowed) ||
1258                                      task_running(rq, task) ||
1259                                      !task->se.on_rq)) {
1260
1261                                 spin_unlock(&lowest_rq->lock);
1262                                 lowest_rq = NULL;
1263                                 break;
1264                         }
1265                 }
1266
1267                 /* If this rq is still suitable use it. */
1268                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1269                         break;
1270
1271                 /* try again */
1272                 double_unlock_balance(rq, lowest_rq);
1273                 lowest_rq = NULL;
1274         }
1275
1276         return lowest_rq;
1277 }
1278
1279 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1280 {
1281         struct task_struct *p;
1282
1283         if (!has_pushable_tasks(rq))
1284                 return NULL;
1285
1286         p = plist_first_entry(&rq->rt.pushable_tasks,
1287                               struct task_struct, pushable_tasks);
1288
1289         BUG_ON(rq->cpu != task_cpu(p));
1290         BUG_ON(task_current(rq, p));
1291         BUG_ON(p->rt.nr_cpus_allowed <= 1);
1292
1293         BUG_ON(!p->se.on_rq);
1294         BUG_ON(!rt_task(p));
1295
1296         return p;
1297 }
1298
1299 /*
1300  * If the current CPU has more than one RT task, see if the non
1301  * running task can migrate over to a CPU that is running a task
1302  * of lesser priority.
1303  */
1304 static int push_rt_task(struct rq *rq)
1305 {
1306         struct task_struct *next_task;
1307         struct rq *lowest_rq;
1308
1309         if (!rq->rt.overloaded)
1310                 return 0;
1311
1312         next_task = pick_next_pushable_task(rq);
1313         if (!next_task)
1314                 return 0;
1315
1316  retry:
1317         if (unlikely(next_task == rq->curr)) {
1318                 WARN_ON(1);
1319                 return 0;
1320         }
1321
1322         /*
1323          * It's possible that the next_task slipped in of
1324          * higher priority than current. If that's the case
1325          * just reschedule current.
1326          */
1327         if (unlikely(next_task->prio < rq->curr->prio)) {
1328                 resched_task(rq->curr);
1329                 return 0;
1330         }
1331
1332         /* We might release rq lock */
1333         get_task_struct(next_task);
1334
1335         /* find_lock_lowest_rq locks the rq if found */
1336         lowest_rq = find_lock_lowest_rq(next_task, rq);
1337         if (!lowest_rq) {
1338                 struct task_struct *task;
1339                 /*
1340                  * find lock_lowest_rq releases rq->lock
1341                  * so it is possible that next_task has migrated.
1342                  *
1343                  * We need to make sure that the task is still on the same
1344                  * run-queue and is also still the next task eligible for
1345                  * pushing.
1346                  */
1347                 task = pick_next_pushable_task(rq);
1348                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1349                         /*
1350                          * If we get here, the task hasnt moved at all, but
1351                          * it has failed to push.  We will not try again,
1352                          * since the other cpus will pull from us when they
1353                          * are ready.
1354                          */
1355                         dequeue_pushable_task(rq, next_task);
1356                         goto out;
1357                 }
1358
1359                 if (!task)
1360                         /* No more tasks, just exit */
1361                         goto out;
1362
1363                 /*
1364                  * Something has shifted, try again.
1365                  */
1366                 put_task_struct(next_task);
1367                 next_task = task;
1368                 goto retry;
1369         }
1370
1371         deactivate_task(rq, next_task, 0);
1372         set_task_cpu(next_task, lowest_rq->cpu);
1373         activate_task(lowest_rq, next_task, 0);
1374
1375         resched_task(lowest_rq->curr);
1376
1377         double_unlock_balance(rq, lowest_rq);
1378
1379 out:
1380         put_task_struct(next_task);
1381
1382         return 1;
1383 }
1384
1385 static void push_rt_tasks(struct rq *rq)
1386 {
1387         /* push_rt_task will return true if it moved an RT */
1388         while (push_rt_task(rq))
1389                 ;
1390 }
1391
1392 static int pull_rt_task(struct rq *this_rq)
1393 {
1394         int this_cpu = this_rq->cpu, ret = 0, cpu;
1395         struct task_struct *p;
1396         struct rq *src_rq;
1397
1398         if (likely(!rt_overloaded(this_rq)))
1399                 return 0;
1400
1401         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1402                 if (this_cpu == cpu)
1403                         continue;
1404
1405                 src_rq = cpu_rq(cpu);
1406
1407                 /*
1408                  * Don't bother taking the src_rq->lock if the next highest
1409                  * task is known to be lower-priority than our current task.
1410                  * This may look racy, but if this value is about to go
1411                  * logically higher, the src_rq will push this task away.
1412                  * And if its going logically lower, we do not care
1413                  */
1414                 if (src_rq->rt.highest_prio.next >=
1415                     this_rq->rt.highest_prio.curr)
1416                         continue;
1417
1418                 /*
1419                  * We can potentially drop this_rq's lock in
1420                  * double_lock_balance, and another CPU could
1421                  * alter this_rq
1422                  */
1423                 double_lock_balance(this_rq, src_rq);
1424
1425                 /*
1426                  * Are there still pullable RT tasks?
1427                  */
1428                 if (src_rq->rt.rt_nr_running <= 1)
1429                         goto skip;
1430
1431                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1432
1433                 /*
1434                  * Do we have an RT task that preempts
1435                  * the to-be-scheduled task?
1436                  */
1437                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1438                         WARN_ON(p == src_rq->curr);
1439                         WARN_ON(!p->se.on_rq);
1440
1441                         /*
1442                          * There's a chance that p is higher in priority
1443                          * than what's currently running on its cpu.
1444                          * This is just that p is wakeing up and hasn't
1445                          * had a chance to schedule. We only pull
1446                          * p if it is lower in priority than the
1447                          * current task on the run queue
1448                          */
1449                         if (p->prio < src_rq->curr->prio)
1450                                 goto skip;
1451
1452                         ret = 1;
1453
1454                         deactivate_task(src_rq, p, 0);
1455                         set_task_cpu(p, this_cpu);
1456                         activate_task(this_rq, p, 0);
1457                         /*
1458                          * We continue with the search, just in
1459                          * case there's an even higher prio task
1460                          * in another runqueue. (low likelyhood
1461                          * but possible)
1462                          */
1463                 }
1464  skip:
1465                 double_unlock_balance(this_rq, src_rq);
1466         }
1467
1468         return ret;
1469 }
1470
1471 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1472 {
1473         /* Try to pull RT tasks here if we lower this rq's prio */
1474         if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1475                 pull_rt_task(rq);
1476 }
1477
1478 static void post_schedule_rt(struct rq *rq)
1479 {
1480         push_rt_tasks(rq);
1481 }
1482
1483 /*
1484  * If we are not running and we are not going to reschedule soon, we should
1485  * try to push tasks away now
1486  */
1487 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1488 {
1489         if (!task_running(rq, p) &&
1490             !test_tsk_need_resched(rq->curr) &&
1491             has_pushable_tasks(rq) &&
1492             p->rt.nr_cpus_allowed > 1)
1493                 push_rt_tasks(rq);
1494 }
1495
1496 static unsigned long
1497 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1498                 unsigned long max_load_move,
1499                 struct sched_domain *sd, enum cpu_idle_type idle,
1500                 int *all_pinned, int *this_best_prio)
1501 {
1502         /* don't touch RT tasks */
1503         return 0;
1504 }
1505
1506 static int
1507 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1508                  struct sched_domain *sd, enum cpu_idle_type idle)
1509 {
1510         /* don't touch RT tasks */
1511         return 0;
1512 }
1513
1514 static void set_cpus_allowed_rt(struct task_struct *p,
1515                                 const struct cpumask *new_mask)
1516 {
1517         int weight = cpumask_weight(new_mask);
1518
1519         BUG_ON(!rt_task(p));
1520
1521         /*
1522          * Update the migration status of the RQ if we have an RT task
1523          * which is running AND changing its weight value.
1524          */
1525         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1526                 struct rq *rq = task_rq(p);
1527
1528                 if (!task_current(rq, p)) {
1529                         /*
1530                          * Make sure we dequeue this task from the pushable list
1531                          * before going further.  It will either remain off of
1532                          * the list because we are no longer pushable, or it
1533                          * will be requeued.
1534                          */
1535                         if (p->rt.nr_cpus_allowed > 1)
1536                                 dequeue_pushable_task(rq, p);
1537
1538                         /*
1539                          * Requeue if our weight is changing and still > 1
1540                          */
1541                         if (weight > 1)
1542                                 enqueue_pushable_task(rq, p);
1543
1544                 }
1545
1546                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1547                         rq->rt.rt_nr_migratory++;
1548                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1549                         BUG_ON(!rq->rt.rt_nr_migratory);
1550                         rq->rt.rt_nr_migratory--;
1551                 }
1552
1553                 update_rt_migration(&rq->rt);
1554         }
1555
1556         cpumask_copy(&p->cpus_allowed, new_mask);
1557         p->rt.nr_cpus_allowed = weight;
1558 }
1559
1560 /* Assumes rq->lock is held */
1561 static void rq_online_rt(struct rq *rq)
1562 {
1563         if (rq->rt.overloaded)
1564                 rt_set_overload(rq);
1565
1566         __enable_runtime(rq);
1567
1568         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1569 }
1570
1571 /* Assumes rq->lock is held */
1572 static void rq_offline_rt(struct rq *rq)
1573 {
1574         if (rq->rt.overloaded)
1575                 rt_clear_overload(rq);
1576
1577         __disable_runtime(rq);
1578
1579         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1580 }
1581
1582 /*
1583  * When switch from the rt queue, we bring ourselves to a position
1584  * that we might want to pull RT tasks from other runqueues.
1585  */
1586 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1587                            int running)
1588 {
1589         /*
1590          * If there are other RT tasks then we will reschedule
1591          * and the scheduling of the other RT tasks will handle
1592          * the balancing. But if we are the last RT task
1593          * we may need to handle the pulling of RT tasks
1594          * now.
1595          */
1596         if (!rq->rt.rt_nr_running)
1597                 pull_rt_task(rq);
1598 }
1599
1600 static inline void init_sched_rt_class(void)
1601 {
1602         unsigned int i;
1603
1604         for_each_possible_cpu(i)
1605                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1606                                         GFP_KERNEL, cpu_to_node(i));
1607 }
1608 #endif /* CONFIG_SMP */
1609
1610 /*
1611  * When switching a task to RT, we may overload the runqueue
1612  * with RT tasks. In this case we try to push them off to
1613  * other runqueues.
1614  */
1615 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1616                            int running)
1617 {
1618         int check_resched = 1;
1619
1620         /*
1621          * If we are already running, then there's nothing
1622          * that needs to be done. But if we are not running
1623          * we may need to preempt the current running task.
1624          * If that current running task is also an RT task
1625          * then see if we can move to another run queue.
1626          */
1627         if (!running) {
1628 #ifdef CONFIG_SMP
1629                 if (rq->rt.overloaded && push_rt_task(rq) &&
1630                     /* Don't resched if we changed runqueues */
1631                     rq != task_rq(p))
1632                         check_resched = 0;
1633 #endif /* CONFIG_SMP */
1634                 if (check_resched && p->prio < rq->curr->prio)
1635                         resched_task(rq->curr);
1636         }
1637 }
1638
1639 /*
1640  * Priority of the task has changed. This may cause
1641  * us to initiate a push or pull.
1642  */
1643 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1644                             int oldprio, int running)
1645 {
1646         if (running) {
1647 #ifdef CONFIG_SMP
1648                 /*
1649                  * If our priority decreases while running, we
1650                  * may need to pull tasks to this runqueue.
1651                  */
1652                 if (oldprio < p->prio)
1653                         pull_rt_task(rq);
1654                 /*
1655                  * If there's a higher priority task waiting to run
1656                  * then reschedule. Note, the above pull_rt_task
1657                  * can release the rq lock and p could migrate.
1658                  * Only reschedule if p is still on the same runqueue.
1659                  */
1660                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1661                         resched_task(p);
1662 #else
1663                 /* For UP simply resched on drop of prio */
1664                 if (oldprio < p->prio)
1665                         resched_task(p);
1666 #endif /* CONFIG_SMP */
1667         } else {
1668                 /*
1669                  * This task is not running, but if it is
1670                  * greater than the current running task
1671                  * then reschedule.
1672                  */
1673                 if (p->prio < rq->curr->prio)
1674                         resched_task(rq->curr);
1675         }
1676 }
1677
1678 static void watchdog(struct rq *rq, struct task_struct *p)
1679 {
1680         unsigned long soft, hard;
1681
1682         if (!p->signal)
1683                 return;
1684
1685         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1686         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1687
1688         if (soft != RLIM_INFINITY) {
1689                 unsigned long next;
1690
1691                 p->rt.timeout++;
1692                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1693                 if (p->rt.timeout > next)
1694                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1695         }
1696 }
1697
1698 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1699 {
1700         update_curr_rt(rq);
1701
1702         watchdog(rq, p);
1703
1704         /*
1705          * RR tasks need a special form of timeslice management.
1706          * FIFO tasks have no timeslices.
1707          */
1708         if (p->policy != SCHED_RR)
1709                 return;
1710
1711         if (--p->rt.time_slice)
1712                 return;
1713
1714         p->rt.time_slice = DEF_TIMESLICE;
1715
1716         /*
1717          * Requeue to the end of queue if we are not the only element
1718          * on the queue:
1719          */
1720         if (p->rt.run_list.prev != p->rt.run_list.next) {
1721                 requeue_task_rt(rq, p, 0);
1722                 set_tsk_need_resched(p);
1723         }
1724 }
1725
1726 static void set_curr_task_rt(struct rq *rq)
1727 {
1728         struct task_struct *p = rq->curr;
1729
1730         p->se.exec_start = rq->clock;
1731
1732         /* The running task is never eligible for pushing */
1733         dequeue_pushable_task(rq, p);
1734 }
1735
1736 static const struct sched_class rt_sched_class = {
1737         .next                   = &fair_sched_class,
1738         .enqueue_task           = enqueue_task_rt,
1739         .dequeue_task           = dequeue_task_rt,
1740         .yield_task             = yield_task_rt,
1741
1742         .check_preempt_curr     = check_preempt_curr_rt,
1743
1744         .pick_next_task         = pick_next_task_rt,
1745         .put_prev_task          = put_prev_task_rt,
1746
1747 #ifdef CONFIG_SMP
1748         .select_task_rq         = select_task_rq_rt,
1749
1750         .load_balance           = load_balance_rt,
1751         .move_one_task          = move_one_task_rt,
1752         .set_cpus_allowed       = set_cpus_allowed_rt,
1753         .rq_online              = rq_online_rt,
1754         .rq_offline             = rq_offline_rt,
1755         .pre_schedule           = pre_schedule_rt,
1756         .post_schedule          = post_schedule_rt,
1757         .task_wake_up           = task_wake_up_rt,
1758         .switched_from          = switched_from_rt,
1759 #endif
1760
1761         .set_curr_task          = set_curr_task_rt,
1762         .task_tick              = task_tick_rt,
1763
1764         .prio_changed           = prio_changed_rt,
1765         .switched_to            = switched_to_rt,
1766 };
1767
1768 #ifdef CONFIG_SCHED_DEBUG
1769 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1770
1771 static void print_rt_stats(struct seq_file *m, int cpu)
1772 {
1773         struct rt_rq *rt_rq;
1774
1775         rcu_read_lock();
1776         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1777                 print_rt_rq(m, cpu, rt_rq);
1778         rcu_read_unlock();
1779 }
1780 #endif /* CONFIG_SCHED_DEBUG */
1781