sched: wake-balance fixes
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7 static cpumask_t rt_overload_mask;
8 static atomic_t rto_count;
9 static inline int rt_overloaded(void)
10 {
11         return atomic_read(&rto_count);
12 }
13 static inline cpumask_t *rt_overload(void)
14 {
15         return &rt_overload_mask;
16 }
17 static inline void rt_set_overload(struct rq *rq)
18 {
19         rq->rt.overloaded = 1;
20         cpu_set(rq->cpu, rt_overload_mask);
21         /*
22          * Make sure the mask is visible before we set
23          * the overload count. That is checked to determine
24          * if we should look at the mask. It would be a shame
25          * if we looked at the mask, but the mask was not
26          * updated yet.
27          */
28         wmb();
29         atomic_inc(&rto_count);
30 }
31 static inline void rt_clear_overload(struct rq *rq)
32 {
33         /* the order here really doesn't matter */
34         atomic_dec(&rto_count);
35         cpu_clear(rq->cpu, rt_overload_mask);
36         rq->rt.overloaded = 0;
37 }
38
39 static void update_rt_migration(struct rq *rq)
40 {
41         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
42                 rt_set_overload(rq);
43         else
44                 rt_clear_overload(rq);
45 }
46 #endif /* CONFIG_SMP */
47
48 /*
49  * Update the current task's runtime statistics. Skip current tasks that
50  * are not in our scheduling class.
51  */
52 static void update_curr_rt(struct rq *rq)
53 {
54         struct task_struct *curr = rq->curr;
55         u64 delta_exec;
56
57         if (!task_has_rt_policy(curr))
58                 return;
59
60         delta_exec = rq->clock - curr->se.exec_start;
61         if (unlikely((s64)delta_exec < 0))
62                 delta_exec = 0;
63
64         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
65
66         curr->se.sum_exec_runtime += delta_exec;
67         curr->se.exec_start = rq->clock;
68         cpuacct_charge(curr, delta_exec);
69 }
70
71 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
72 {
73         WARN_ON(!rt_task(p));
74         rq->rt.rt_nr_running++;
75 #ifdef CONFIG_SMP
76         if (p->prio < rq->rt.highest_prio)
77                 rq->rt.highest_prio = p->prio;
78         if (p->nr_cpus_allowed > 1)
79                 rq->rt.rt_nr_migratory++;
80
81         update_rt_migration(rq);
82 #endif /* CONFIG_SMP */
83 }
84
85 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
86 {
87         WARN_ON(!rt_task(p));
88         WARN_ON(!rq->rt.rt_nr_running);
89         rq->rt.rt_nr_running--;
90 #ifdef CONFIG_SMP
91         if (rq->rt.rt_nr_running) {
92                 struct rt_prio_array *array;
93
94                 WARN_ON(p->prio < rq->rt.highest_prio);
95                 if (p->prio == rq->rt.highest_prio) {
96                         /* recalculate */
97                         array = &rq->rt.active;
98                         rq->rt.highest_prio =
99                                 sched_find_first_bit(array->bitmap);
100                 } /* otherwise leave rq->highest prio alone */
101         } else
102                 rq->rt.highest_prio = MAX_RT_PRIO;
103         if (p->nr_cpus_allowed > 1)
104                 rq->rt.rt_nr_migratory--;
105
106         update_rt_migration(rq);
107 #endif /* CONFIG_SMP */
108 }
109
110 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
111 {
112         struct rt_prio_array *array = &rq->rt.active;
113
114         list_add_tail(&p->run_list, array->queue + p->prio);
115         __set_bit(p->prio, array->bitmap);
116         inc_cpu_load(rq, p->se.load.weight);
117
118         inc_rt_tasks(p, rq);
119 }
120
121 /*
122  * Adding/removing a task to/from a priority array:
123  */
124 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
125 {
126         struct rt_prio_array *array = &rq->rt.active;
127
128         update_curr_rt(rq);
129
130         list_del(&p->run_list);
131         if (list_empty(array->queue + p->prio))
132                 __clear_bit(p->prio, array->bitmap);
133         dec_cpu_load(rq, p->se.load.weight);
134
135         dec_rt_tasks(p, rq);
136 }
137
138 /*
139  * Put task to the end of the run list without the overhead of dequeue
140  * followed by enqueue.
141  */
142 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
143 {
144         struct rt_prio_array *array = &rq->rt.active;
145
146         list_move_tail(&p->run_list, array->queue + p->prio);
147 }
148
149 static void
150 yield_task_rt(struct rq *rq)
151 {
152         requeue_task_rt(rq, rq->curr);
153 }
154
155 #ifdef CONFIG_SMP
156 static int find_lowest_rq(struct task_struct *task);
157
158 static int select_task_rq_rt(struct task_struct *p, int sync)
159 {
160         struct rq *rq = task_rq(p);
161
162         /*
163          * If the task will not preempt the RQ, try to find a better RQ
164          * before we even activate the task
165          */
166         if ((p->prio >= rq->rt.highest_prio)
167             && (p->nr_cpus_allowed > 1)) {
168                 int cpu = find_lowest_rq(p);
169
170                 return (cpu == -1) ? task_cpu(p) : cpu;
171         }
172
173         /*
174          * Otherwise, just let it ride on the affined RQ and the
175          * post-schedule router will push the preempted task away
176          */
177         return task_cpu(p);
178 }
179 #endif /* CONFIG_SMP */
180
181 /*
182  * Preempt the current task with a newly woken task if needed:
183  */
184 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
185 {
186         if (p->prio < rq->curr->prio)
187                 resched_task(rq->curr);
188 }
189
190 static struct task_struct *pick_next_task_rt(struct rq *rq)
191 {
192         struct rt_prio_array *array = &rq->rt.active;
193         struct task_struct *next;
194         struct list_head *queue;
195         int idx;
196
197         idx = sched_find_first_bit(array->bitmap);
198         if (idx >= MAX_RT_PRIO)
199                 return NULL;
200
201         queue = array->queue + idx;
202         next = list_entry(queue->next, struct task_struct, run_list);
203
204         next->se.exec_start = rq->clock;
205
206         return next;
207 }
208
209 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
210 {
211         update_curr_rt(rq);
212         p->se.exec_start = 0;
213 }
214
215 #ifdef CONFIG_SMP
216 /* Only try algorithms three times */
217 #define RT_MAX_TRIES 3
218
219 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
220 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
221
222 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
223 {
224         if (!task_running(rq, p) &&
225             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
226             (p->nr_cpus_allowed > 1))
227                 return 1;
228         return 0;
229 }
230
231 /* Return the second highest RT task, NULL otherwise */
232 static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
233                                                      int cpu)
234 {
235         struct rt_prio_array *array = &rq->rt.active;
236         struct task_struct *next;
237         struct list_head *queue;
238         int idx;
239
240         assert_spin_locked(&rq->lock);
241
242         if (likely(rq->rt.rt_nr_running < 2))
243                 return NULL;
244
245         idx = sched_find_first_bit(array->bitmap);
246         if (unlikely(idx >= MAX_RT_PRIO)) {
247                 WARN_ON(1); /* rt_nr_running is bad */
248                 return NULL;
249         }
250
251         queue = array->queue + idx;
252         BUG_ON(list_empty(queue));
253
254         next = list_entry(queue->next, struct task_struct, run_list);
255         if (unlikely(pick_rt_task(rq, next, cpu)))
256                 goto out;
257
258         if (queue->next->next != queue) {
259                 /* same prio task */
260                 next = list_entry(queue->next->next, struct task_struct, run_list);
261                 if (pick_rt_task(rq, next, cpu))
262                         goto out;
263         }
264
265  retry:
266         /* slower, but more flexible */
267         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
268         if (unlikely(idx >= MAX_RT_PRIO))
269                 return NULL;
270
271         queue = array->queue + idx;
272         BUG_ON(list_empty(queue));
273
274         list_for_each_entry(next, queue, run_list) {
275                 if (pick_rt_task(rq, next, cpu))
276                         goto out;
277         }
278
279         goto retry;
280
281  out:
282         return next;
283 }
284
285 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
286 static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
287
288 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
289 {
290         int       cpu;
291         cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
292         int       lowest_prio = -1;
293         int       ret         = 0;
294
295         cpus_clear(*lowest_mask);
296         cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
297
298         /*
299          * Scan each rq for the lowest prio.
300          */
301         for_each_cpu_mask(cpu, *valid_mask) {
302                 struct rq *rq = cpu_rq(cpu);
303
304                 /* We look for lowest RT prio or non-rt CPU */
305                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
306                         if (ret)
307                                 cpus_clear(*lowest_mask);
308                         cpu_set(rq->cpu, *lowest_mask);
309                         return 1;
310                 }
311
312                 /* no locking for now */
313                 if ((rq->rt.highest_prio > task->prio)
314                     && (rq->rt.highest_prio >= lowest_prio)) {
315                         if (rq->rt.highest_prio > lowest_prio) {
316                                 /* new low - clear old data */
317                                 lowest_prio = rq->rt.highest_prio;
318                                 cpus_clear(*lowest_mask);
319                         }
320                         cpu_set(rq->cpu, *lowest_mask);
321                         ret = 1;
322                 }
323         }
324
325         return ret;
326 }
327
328 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
329 {
330         int first;
331
332         /* "this_cpu" is cheaper to preempt than a remote processor */
333         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
334                 return this_cpu;
335
336         first = first_cpu(*mask);
337         if (first != NR_CPUS)
338                 return first;
339
340         return -1;
341 }
342
343 static int find_lowest_rq(struct task_struct *task)
344 {
345         struct sched_domain *sd;
346         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
347         int this_cpu = smp_processor_id();
348         int cpu      = task_cpu(task);
349
350         if (!find_lowest_cpus(task, lowest_mask))
351                 return -1;
352
353         /*
354          * At this point we have built a mask of cpus representing the
355          * lowest priority tasks in the system.  Now we want to elect
356          * the best one based on our affinity and topology.
357          *
358          * We prioritize the last cpu that the task executed on since
359          * it is most likely cache-hot in that location.
360          */
361         if (cpu_isset(cpu, *lowest_mask))
362                 return cpu;
363
364         /*
365          * Otherwise, we consult the sched_domains span maps to figure
366          * out which cpu is logically closest to our hot cache data.
367          */
368         if (this_cpu == cpu)
369                 this_cpu = -1; /* Skip this_cpu opt if the same */
370
371         for_each_domain(cpu, sd) {
372                 if (sd->flags & SD_WAKE_AFFINE) {
373                         cpumask_t domain_mask;
374                         int       best_cpu;
375
376                         cpus_and(domain_mask, sd->span, *lowest_mask);
377
378                         best_cpu = pick_optimal_cpu(this_cpu,
379                                                     &domain_mask);
380                         if (best_cpu != -1)
381                                 return best_cpu;
382                 }
383         }
384
385         /*
386          * And finally, if there were no matches within the domains
387          * just give the caller *something* to work with from the compatible
388          * locations.
389          */
390         return pick_optimal_cpu(this_cpu, lowest_mask);
391 }
392
393 /* Will lock the rq it finds */
394 static struct rq *find_lock_lowest_rq(struct task_struct *task,
395                                       struct rq *rq)
396 {
397         struct rq *lowest_rq = NULL;
398         int cpu;
399         int tries;
400
401         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
402                 cpu = find_lowest_rq(task);
403
404                 if ((cpu == -1) || (cpu == rq->cpu))
405                         break;
406
407                 lowest_rq = cpu_rq(cpu);
408
409                 /* if the prio of this runqueue changed, try again */
410                 if (double_lock_balance(rq, lowest_rq)) {
411                         /*
412                          * We had to unlock the run queue. In
413                          * the mean time, task could have
414                          * migrated already or had its affinity changed.
415                          * Also make sure that it wasn't scheduled on its rq.
416                          */
417                         if (unlikely(task_rq(task) != rq ||
418                                      !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
419                                      task_running(rq, task) ||
420                                      !task->se.on_rq)) {
421                                 spin_unlock(&lowest_rq->lock);
422                                 lowest_rq = NULL;
423                                 break;
424                         }
425                 }
426
427                 /* If this rq is still suitable use it. */
428                 if (lowest_rq->rt.highest_prio > task->prio)
429                         break;
430
431                 /* try again */
432                 spin_unlock(&lowest_rq->lock);
433                 lowest_rq = NULL;
434         }
435
436         return lowest_rq;
437 }
438
439 /*
440  * If the current CPU has more than one RT task, see if the non
441  * running task can migrate over to a CPU that is running a task
442  * of lesser priority.
443  */
444 static int push_rt_task(struct rq *rq)
445 {
446         struct task_struct *next_task;
447         struct rq *lowest_rq;
448         int ret = 0;
449         int paranoid = RT_MAX_TRIES;
450
451         assert_spin_locked(&rq->lock);
452
453         if (!rq->rt.overloaded)
454                 return 0;
455
456         next_task = pick_next_highest_task_rt(rq, -1);
457         if (!next_task)
458                 return 0;
459
460  retry:
461         if (unlikely(next_task == rq->curr)) {
462                 WARN_ON(1);
463                 return 0;
464         }
465
466         /*
467          * It's possible that the next_task slipped in of
468          * higher priority than current. If that's the case
469          * just reschedule current.
470          */
471         if (unlikely(next_task->prio < rq->curr->prio)) {
472                 resched_task(rq->curr);
473                 return 0;
474         }
475
476         /* We might release rq lock */
477         get_task_struct(next_task);
478
479         /* find_lock_lowest_rq locks the rq if found */
480         lowest_rq = find_lock_lowest_rq(next_task, rq);
481         if (!lowest_rq) {
482                 struct task_struct *task;
483                 /*
484                  * find lock_lowest_rq releases rq->lock
485                  * so it is possible that next_task has changed.
486                  * If it has, then try again.
487                  */
488                 task = pick_next_highest_task_rt(rq, -1);
489                 if (unlikely(task != next_task) && task && paranoid--) {
490                         put_task_struct(next_task);
491                         next_task = task;
492                         goto retry;
493                 }
494                 goto out;
495         }
496
497         assert_spin_locked(&lowest_rq->lock);
498
499         deactivate_task(rq, next_task, 0);
500         set_task_cpu(next_task, lowest_rq->cpu);
501         activate_task(lowest_rq, next_task, 0);
502
503         resched_task(lowest_rq->curr);
504
505         spin_unlock(&lowest_rq->lock);
506
507         ret = 1;
508 out:
509         put_task_struct(next_task);
510
511         return ret;
512 }
513
514 /*
515  * TODO: Currently we just use the second highest prio task on
516  *       the queue, and stop when it can't migrate (or there's
517  *       no more RT tasks).  There may be a case where a lower
518  *       priority RT task has a different affinity than the
519  *       higher RT task. In this case the lower RT task could
520  *       possibly be able to migrate where as the higher priority
521  *       RT task could not.  We currently ignore this issue.
522  *       Enhancements are welcome!
523  */
524 static void push_rt_tasks(struct rq *rq)
525 {
526         /* push_rt_task will return true if it moved an RT */
527         while (push_rt_task(rq))
528                 ;
529 }
530
531 static int pull_rt_task(struct rq *this_rq)
532 {
533         struct task_struct *next;
534         struct task_struct *p;
535         struct rq *src_rq;
536         cpumask_t *rto_cpumask;
537         int this_cpu = this_rq->cpu;
538         int cpu;
539         int ret = 0;
540
541         assert_spin_locked(&this_rq->lock);
542
543         /*
544          * If cpusets are used, and we have overlapping
545          * run queue cpusets, then this algorithm may not catch all.
546          * This is just the price you pay on trying to keep
547          * dirtying caches down on large SMP machines.
548          */
549         if (likely(!rt_overloaded()))
550                 return 0;
551
552         next = pick_next_task_rt(this_rq);
553
554         rto_cpumask = rt_overload();
555
556         for_each_cpu_mask(cpu, *rto_cpumask) {
557                 if (this_cpu == cpu)
558                         continue;
559
560                 src_rq = cpu_rq(cpu);
561                 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
562                         /*
563                          * It is possible that overlapping cpusets
564                          * will miss clearing a non overloaded runqueue.
565                          * Clear it now.
566                          */
567                         if (double_lock_balance(this_rq, src_rq)) {
568                                 /* unlocked our runqueue lock */
569                                 struct task_struct *old_next = next;
570                                 next = pick_next_task_rt(this_rq);
571                                 if (next != old_next)
572                                         ret = 1;
573                         }
574                         if (likely(src_rq->rt.rt_nr_running <= 1))
575                                 /*
576                                  * Small chance that this_rq->curr changed
577                                  * but it's really harmless here.
578                                  */
579                                 rt_clear_overload(this_rq);
580                         else
581                                 /*
582                                  * Heh, the src_rq is now overloaded, since
583                                  * we already have the src_rq lock, go straight
584                                  * to pulling tasks from it.
585                                  */
586                                 goto try_pulling;
587                         spin_unlock(&src_rq->lock);
588                         continue;
589                 }
590
591                 /*
592                  * We can potentially drop this_rq's lock in
593                  * double_lock_balance, and another CPU could
594                  * steal our next task - hence we must cause
595                  * the caller to recalculate the next task
596                  * in that case:
597                  */
598                 if (double_lock_balance(this_rq, src_rq)) {
599                         struct task_struct *old_next = next;
600                         next = pick_next_task_rt(this_rq);
601                         if (next != old_next)
602                                 ret = 1;
603                 }
604
605                 /*
606                  * Are there still pullable RT tasks?
607                  */
608                 if (src_rq->rt.rt_nr_running <= 1) {
609                         spin_unlock(&src_rq->lock);
610                         continue;
611                 }
612
613  try_pulling:
614                 p = pick_next_highest_task_rt(src_rq, this_cpu);
615
616                 /*
617                  * Do we have an RT task that preempts
618                  * the to-be-scheduled task?
619                  */
620                 if (p && (!next || (p->prio < next->prio))) {
621                         WARN_ON(p == src_rq->curr);
622                         WARN_ON(!p->se.on_rq);
623
624                         /*
625                          * There's a chance that p is higher in priority
626                          * than what's currently running on its cpu.
627                          * This is just that p is wakeing up and hasn't
628                          * had a chance to schedule. We only pull
629                          * p if it is lower in priority than the
630                          * current task on the run queue or
631                          * this_rq next task is lower in prio than
632                          * the current task on that rq.
633                          */
634                         if (p->prio < src_rq->curr->prio ||
635                             (next && next->prio < src_rq->curr->prio))
636                                 goto bail;
637
638                         ret = 1;
639
640                         deactivate_task(src_rq, p, 0);
641                         set_task_cpu(p, this_cpu);
642                         activate_task(this_rq, p, 0);
643                         /*
644                          * We continue with the search, just in
645                          * case there's an even higher prio task
646                          * in another runqueue. (low likelyhood
647                          * but possible)
648                          */
649
650                         /*
651                          * Update next so that we won't pick a task
652                          * on another cpu with a priority lower (or equal)
653                          * than the one we just picked.
654                          */
655                         next = p;
656
657                 }
658  bail:
659                 spin_unlock(&src_rq->lock);
660         }
661
662         return ret;
663 }
664
665 static void schedule_balance_rt(struct rq *rq,
666                                 struct task_struct *prev)
667 {
668         /* Try to pull RT tasks here if we lower this rq's prio */
669         if (unlikely(rt_task(prev)) &&
670             rq->rt.highest_prio > prev->prio)
671                 pull_rt_task(rq);
672 }
673
674 static void schedule_tail_balance_rt(struct rq *rq)
675 {
676         /*
677          * If we have more than one rt_task queued, then
678          * see if we can push the other rt_tasks off to other CPUS.
679          * Note we may release the rq lock, and since
680          * the lock was owned by prev, we need to release it
681          * first via finish_lock_switch and then reaquire it here.
682          */
683         if (unlikely(rq->rt.overloaded)) {
684                 spin_lock_irq(&rq->lock);
685                 push_rt_tasks(rq);
686                 spin_unlock_irq(&rq->lock);
687         }
688 }
689
690
691 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
692 {
693         if (unlikely(rt_task(p)) &&
694             !task_running(rq, p) &&
695             (p->prio >= rq->rt.highest_prio) &&
696             rq->rt.overloaded)
697                 push_rt_tasks(rq);
698 }
699
700 static unsigned long
701 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
702                 unsigned long max_load_move,
703                 struct sched_domain *sd, enum cpu_idle_type idle,
704                 int *all_pinned, int *this_best_prio)
705 {
706         /* don't touch RT tasks */
707         return 0;
708 }
709
710 static int
711 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
712                  struct sched_domain *sd, enum cpu_idle_type idle)
713 {
714         /* don't touch RT tasks */
715         return 0;
716 }
717 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
718 {
719         int weight = cpus_weight(*new_mask);
720
721         BUG_ON(!rt_task(p));
722
723         /*
724          * Update the migration status of the RQ if we have an RT task
725          * which is running AND changing its weight value.
726          */
727         if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
728                 struct rq *rq = task_rq(p);
729
730                 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
731                         rq->rt.rt_nr_migratory++;
732                 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
733                         BUG_ON(!rq->rt.rt_nr_migratory);
734                         rq->rt.rt_nr_migratory--;
735                 }
736
737                 update_rt_migration(rq);
738         }
739
740         p->cpus_allowed    = *new_mask;
741         p->nr_cpus_allowed = weight;
742 }
743 #else /* CONFIG_SMP */
744 # define schedule_tail_balance_rt(rq)   do { } while (0)
745 # define schedule_balance_rt(rq, prev)  do { } while (0)
746 # define wakeup_balance_rt(rq, p)       do { } while (0)
747 #endif /* CONFIG_SMP */
748
749 static void task_tick_rt(struct rq *rq, struct task_struct *p)
750 {
751         update_curr_rt(rq);
752
753         /*
754          * RR tasks need a special form of timeslice management.
755          * FIFO tasks have no timeslices.
756          */
757         if (p->policy != SCHED_RR)
758                 return;
759
760         if (--p->time_slice)
761                 return;
762
763         p->time_slice = DEF_TIMESLICE;
764
765         /*
766          * Requeue to the end of queue if we are not the only element
767          * on the queue:
768          */
769         if (p->run_list.prev != p->run_list.next) {
770                 requeue_task_rt(rq, p);
771                 set_tsk_need_resched(p);
772         }
773 }
774
775 static void set_curr_task_rt(struct rq *rq)
776 {
777         struct task_struct *p = rq->curr;
778
779         p->se.exec_start = rq->clock;
780 }
781
782 const struct sched_class rt_sched_class = {
783         .next                   = &fair_sched_class,
784         .enqueue_task           = enqueue_task_rt,
785         .dequeue_task           = dequeue_task_rt,
786         .yield_task             = yield_task_rt,
787 #ifdef CONFIG_SMP
788         .select_task_rq         = select_task_rq_rt,
789 #endif /* CONFIG_SMP */
790
791         .check_preempt_curr     = check_preempt_curr_rt,
792
793         .pick_next_task         = pick_next_task_rt,
794         .put_prev_task          = put_prev_task_rt,
795
796 #ifdef CONFIG_SMP
797         .load_balance           = load_balance_rt,
798         .move_one_task          = move_one_task_rt,
799         .set_cpus_allowed       = set_cpus_allowed_rt,
800 #endif
801
802         .set_curr_task          = set_curr_task_rt,
803         .task_tick              = task_tick_rt,
804 };