sched: break out search for RT tasks
[safe/jmp/linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7 static cpumask_t rt_overload_mask;
8 static atomic_t rto_count;
9 static inline int rt_overloaded(void)
10 {
11         return atomic_read(&rto_count);
12 }
13 static inline cpumask_t *rt_overload(void)
14 {
15         return &rt_overload_mask;
16 }
17 static inline void rt_set_overload(struct rq *rq)
18 {
19         cpu_set(rq->cpu, rt_overload_mask);
20         /*
21          * Make sure the mask is visible before we set
22          * the overload count. That is checked to determine
23          * if we should look at the mask. It would be a shame
24          * if we looked at the mask, but the mask was not
25          * updated yet.
26          */
27         wmb();
28         atomic_inc(&rto_count);
29 }
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         /* the order here really doesn't matter */
33         atomic_dec(&rto_count);
34         cpu_clear(rq->cpu, rt_overload_mask);
35 }
36
37 static void update_rt_migration(struct rq *rq)
38 {
39         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
40                 rt_set_overload(rq);
41         else
42                 rt_clear_overload(rq);
43 }
44 #endif /* CONFIG_SMP */
45
46 /*
47  * Update the current task's runtime statistics. Skip current tasks that
48  * are not in our scheduling class.
49  */
50 static void update_curr_rt(struct rq *rq)
51 {
52         struct task_struct *curr = rq->curr;
53         u64 delta_exec;
54
55         if (!task_has_rt_policy(curr))
56                 return;
57
58         delta_exec = rq->clock - curr->se.exec_start;
59         if (unlikely((s64)delta_exec < 0))
60                 delta_exec = 0;
61
62         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
63
64         curr->se.sum_exec_runtime += delta_exec;
65         curr->se.exec_start = rq->clock;
66         cpuacct_charge(curr, delta_exec);
67 }
68
69 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
70 {
71         WARN_ON(!rt_task(p));
72         rq->rt.rt_nr_running++;
73 #ifdef CONFIG_SMP
74         if (p->prio < rq->rt.highest_prio)
75                 rq->rt.highest_prio = p->prio;
76         if (p->nr_cpus_allowed > 1)
77                 rq->rt.rt_nr_migratory++;
78
79         update_rt_migration(rq);
80 #endif /* CONFIG_SMP */
81 }
82
83 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
84 {
85         WARN_ON(!rt_task(p));
86         WARN_ON(!rq->rt.rt_nr_running);
87         rq->rt.rt_nr_running--;
88 #ifdef CONFIG_SMP
89         if (rq->rt.rt_nr_running) {
90                 struct rt_prio_array *array;
91
92                 WARN_ON(p->prio < rq->rt.highest_prio);
93                 if (p->prio == rq->rt.highest_prio) {
94                         /* recalculate */
95                         array = &rq->rt.active;
96                         rq->rt.highest_prio =
97                                 sched_find_first_bit(array->bitmap);
98                 } /* otherwise leave rq->highest prio alone */
99         } else
100                 rq->rt.highest_prio = MAX_RT_PRIO;
101         if (p->nr_cpus_allowed > 1)
102                 rq->rt.rt_nr_migratory--;
103
104         update_rt_migration(rq);
105 #endif /* CONFIG_SMP */
106 }
107
108 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
109 {
110         struct rt_prio_array *array = &rq->rt.active;
111
112         list_add_tail(&p->run_list, array->queue + p->prio);
113         __set_bit(p->prio, array->bitmap);
114         inc_cpu_load(rq, p->se.load.weight);
115
116         inc_rt_tasks(p, rq);
117 }
118
119 /*
120  * Adding/removing a task to/from a priority array:
121  */
122 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
123 {
124         struct rt_prio_array *array = &rq->rt.active;
125
126         update_curr_rt(rq);
127
128         list_del(&p->run_list);
129         if (list_empty(array->queue + p->prio))
130                 __clear_bit(p->prio, array->bitmap);
131         dec_cpu_load(rq, p->se.load.weight);
132
133         dec_rt_tasks(p, rq);
134 }
135
136 /*
137  * Put task to the end of the run list without the overhead of dequeue
138  * followed by enqueue.
139  */
140 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
141 {
142         struct rt_prio_array *array = &rq->rt.active;
143
144         list_move_tail(&p->run_list, array->queue + p->prio);
145 }
146
147 static void
148 yield_task_rt(struct rq *rq)
149 {
150         requeue_task_rt(rq, rq->curr);
151 }
152
153 #ifdef CONFIG_SMP
154 static int select_task_rq_rt(struct task_struct *p, int sync)
155 {
156         return task_cpu(p);
157 }
158 #endif /* CONFIG_SMP */
159
160 /*
161  * Preempt the current task with a newly woken task if needed:
162  */
163 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
164 {
165         if (p->prio < rq->curr->prio)
166                 resched_task(rq->curr);
167 }
168
169 static struct task_struct *pick_next_task_rt(struct rq *rq)
170 {
171         struct rt_prio_array *array = &rq->rt.active;
172         struct task_struct *next;
173         struct list_head *queue;
174         int idx;
175
176         idx = sched_find_first_bit(array->bitmap);
177         if (idx >= MAX_RT_PRIO)
178                 return NULL;
179
180         queue = array->queue + idx;
181         next = list_entry(queue->next, struct task_struct, run_list);
182
183         next->se.exec_start = rq->clock;
184
185         return next;
186 }
187
188 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
189 {
190         update_curr_rt(rq);
191         p->se.exec_start = 0;
192 }
193
194 #ifdef CONFIG_SMP
195 /* Only try algorithms three times */
196 #define RT_MAX_TRIES 3
197
198 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
199 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
200
201 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
202 {
203         if (!task_running(rq, p) &&
204             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
205             (p->nr_cpus_allowed > 1))
206                 return 1;
207         return 0;
208 }
209
210 /* Return the second highest RT task, NULL otherwise */
211 static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
212                                                      int cpu)
213 {
214         struct rt_prio_array *array = &rq->rt.active;
215         struct task_struct *next;
216         struct list_head *queue;
217         int idx;
218
219         assert_spin_locked(&rq->lock);
220
221         if (likely(rq->rt.rt_nr_running < 2))
222                 return NULL;
223
224         idx = sched_find_first_bit(array->bitmap);
225         if (unlikely(idx >= MAX_RT_PRIO)) {
226                 WARN_ON(1); /* rt_nr_running is bad */
227                 return NULL;
228         }
229
230         queue = array->queue + idx;
231         BUG_ON(list_empty(queue));
232
233         next = list_entry(queue->next, struct task_struct, run_list);
234         if (unlikely(pick_rt_task(rq, next, cpu)))
235                 goto out;
236
237         if (queue->next->next != queue) {
238                 /* same prio task */
239                 next = list_entry(queue->next->next, struct task_struct, run_list);
240                 if (pick_rt_task(rq, next, cpu))
241                         goto out;
242         }
243
244  retry:
245         /* slower, but more flexible */
246         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
247         if (unlikely(idx >= MAX_RT_PRIO))
248                 return NULL;
249
250         queue = array->queue + idx;
251         BUG_ON(list_empty(queue));
252
253         list_for_each_entry(next, queue, run_list) {
254                 if (pick_rt_task(rq, next, cpu))
255                         goto out;
256         }
257
258         goto retry;
259
260  out:
261         return next;
262 }
263
264 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
265
266 static int find_lowest_rq(struct task_struct *task)
267 {
268         int cpu;
269         cpumask_t *cpu_mask = &__get_cpu_var(local_cpu_mask);
270         struct rq *lowest_rq = NULL;
271
272         cpus_and(*cpu_mask, cpu_online_map, task->cpus_allowed);
273
274         /*
275          * Scan each rq for the lowest prio.
276          */
277         for_each_cpu_mask(cpu, *cpu_mask) {
278                 struct rq *rq = cpu_rq(cpu);
279
280                 if (cpu == rq->cpu)
281                         continue;
282
283                 /* We look for lowest RT prio or non-rt CPU */
284                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
285                         lowest_rq = rq;
286                         break;
287                 }
288
289                 /* no locking for now */
290                 if (rq->rt.highest_prio > task->prio &&
291                     (!lowest_rq || rq->rt.highest_prio > lowest_rq->rt.highest_prio)) {
292                         lowest_rq = rq;
293                 }
294         }
295
296         return lowest_rq ? lowest_rq->cpu : -1;
297 }
298
299 /* Will lock the rq it finds */
300 static struct rq *find_lock_lowest_rq(struct task_struct *task,
301                                       struct rq *rq)
302 {
303         struct rq *lowest_rq = NULL;
304         int cpu;
305         int tries;
306
307         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
308                 cpu = find_lowest_rq(task);
309
310                 if (cpu == -1)
311                         break;
312
313                 lowest_rq = cpu_rq(cpu);
314
315                 /* if the prio of this runqueue changed, try again */
316                 if (double_lock_balance(rq, lowest_rq)) {
317                         /*
318                          * We had to unlock the run queue. In
319                          * the mean time, task could have
320                          * migrated already or had its affinity changed.
321                          * Also make sure that it wasn't scheduled on its rq.
322                          */
323                         if (unlikely(task_rq(task) != rq ||
324                                      !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
325                                      task_running(rq, task) ||
326                                      !task->se.on_rq)) {
327                                 spin_unlock(&lowest_rq->lock);
328                                 lowest_rq = NULL;
329                                 break;
330                         }
331                 }
332
333                 /* If this rq is still suitable use it. */
334                 if (lowest_rq->rt.highest_prio > task->prio)
335                         break;
336
337                 /* try again */
338                 spin_unlock(&lowest_rq->lock);
339                 lowest_rq = NULL;
340         }
341
342         return lowest_rq;
343 }
344
345 /*
346  * If the current CPU has more than one RT task, see if the non
347  * running task can migrate over to a CPU that is running a task
348  * of lesser priority.
349  */
350 static int push_rt_task(struct rq *rq)
351 {
352         struct task_struct *next_task;
353         struct rq *lowest_rq;
354         int ret = 0;
355         int paranoid = RT_MAX_TRIES;
356
357         assert_spin_locked(&rq->lock);
358
359         next_task = pick_next_highest_task_rt(rq, -1);
360         if (!next_task)
361                 return 0;
362
363  retry:
364         if (unlikely(next_task == rq->curr)) {
365                 WARN_ON(1);
366                 return 0;
367         }
368
369         /*
370          * It's possible that the next_task slipped in of
371          * higher priority than current. If that's the case
372          * just reschedule current.
373          */
374         if (unlikely(next_task->prio < rq->curr->prio)) {
375                 resched_task(rq->curr);
376                 return 0;
377         }
378
379         /* We might release rq lock */
380         get_task_struct(next_task);
381
382         /* find_lock_lowest_rq locks the rq if found */
383         lowest_rq = find_lock_lowest_rq(next_task, rq);
384         if (!lowest_rq) {
385                 struct task_struct *task;
386                 /*
387                  * find lock_lowest_rq releases rq->lock
388                  * so it is possible that next_task has changed.
389                  * If it has, then try again.
390                  */
391                 task = pick_next_highest_task_rt(rq, -1);
392                 if (unlikely(task != next_task) && task && paranoid--) {
393                         put_task_struct(next_task);
394                         next_task = task;
395                         goto retry;
396                 }
397                 goto out;
398         }
399
400         assert_spin_locked(&lowest_rq->lock);
401
402         deactivate_task(rq, next_task, 0);
403         set_task_cpu(next_task, lowest_rq->cpu);
404         activate_task(lowest_rq, next_task, 0);
405
406         resched_task(lowest_rq->curr);
407
408         spin_unlock(&lowest_rq->lock);
409
410         ret = 1;
411 out:
412         put_task_struct(next_task);
413
414         return ret;
415 }
416
417 /*
418  * TODO: Currently we just use the second highest prio task on
419  *       the queue, and stop when it can't migrate (or there's
420  *       no more RT tasks).  There may be a case where a lower
421  *       priority RT task has a different affinity than the
422  *       higher RT task. In this case the lower RT task could
423  *       possibly be able to migrate where as the higher priority
424  *       RT task could not.  We currently ignore this issue.
425  *       Enhancements are welcome!
426  */
427 static void push_rt_tasks(struct rq *rq)
428 {
429         /* push_rt_task will return true if it moved an RT */
430         while (push_rt_task(rq))
431                 ;
432 }
433
434 static int pull_rt_task(struct rq *this_rq)
435 {
436         struct task_struct *next;
437         struct task_struct *p;
438         struct rq *src_rq;
439         cpumask_t *rto_cpumask;
440         int this_cpu = this_rq->cpu;
441         int cpu;
442         int ret = 0;
443
444         assert_spin_locked(&this_rq->lock);
445
446         /*
447          * If cpusets are used, and we have overlapping
448          * run queue cpusets, then this algorithm may not catch all.
449          * This is just the price you pay on trying to keep
450          * dirtying caches down on large SMP machines.
451          */
452         if (likely(!rt_overloaded()))
453                 return 0;
454
455         next = pick_next_task_rt(this_rq);
456
457         rto_cpumask = rt_overload();
458
459         for_each_cpu_mask(cpu, *rto_cpumask) {
460                 if (this_cpu == cpu)
461                         continue;
462
463                 src_rq = cpu_rq(cpu);
464                 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
465                         /*
466                          * It is possible that overlapping cpusets
467                          * will miss clearing a non overloaded runqueue.
468                          * Clear it now.
469                          */
470                         if (double_lock_balance(this_rq, src_rq)) {
471                                 /* unlocked our runqueue lock */
472                                 struct task_struct *old_next = next;
473                                 next = pick_next_task_rt(this_rq);
474                                 if (next != old_next)
475                                         ret = 1;
476                         }
477                         if (likely(src_rq->rt.rt_nr_running <= 1))
478                                 /*
479                                  * Small chance that this_rq->curr changed
480                                  * but it's really harmless here.
481                                  */
482                                 rt_clear_overload(this_rq);
483                         else
484                                 /*
485                                  * Heh, the src_rq is now overloaded, since
486                                  * we already have the src_rq lock, go straight
487                                  * to pulling tasks from it.
488                                  */
489                                 goto try_pulling;
490                         spin_unlock(&src_rq->lock);
491                         continue;
492                 }
493
494                 /*
495                  * We can potentially drop this_rq's lock in
496                  * double_lock_balance, and another CPU could
497                  * steal our next task - hence we must cause
498                  * the caller to recalculate the next task
499                  * in that case:
500                  */
501                 if (double_lock_balance(this_rq, src_rq)) {
502                         struct task_struct *old_next = next;
503                         next = pick_next_task_rt(this_rq);
504                         if (next != old_next)
505                                 ret = 1;
506                 }
507
508                 /*
509                  * Are there still pullable RT tasks?
510                  */
511                 if (src_rq->rt.rt_nr_running <= 1) {
512                         spin_unlock(&src_rq->lock);
513                         continue;
514                 }
515
516  try_pulling:
517                 p = pick_next_highest_task_rt(src_rq, this_cpu);
518
519                 /*
520                  * Do we have an RT task that preempts
521                  * the to-be-scheduled task?
522                  */
523                 if (p && (!next || (p->prio < next->prio))) {
524                         WARN_ON(p == src_rq->curr);
525                         WARN_ON(!p->se.on_rq);
526
527                         /*
528                          * There's a chance that p is higher in priority
529                          * than what's currently running on its cpu.
530                          * This is just that p is wakeing up and hasn't
531                          * had a chance to schedule. We only pull
532                          * p if it is lower in priority than the
533                          * current task on the run queue or
534                          * this_rq next task is lower in prio than
535                          * the current task on that rq.
536                          */
537                         if (p->prio < src_rq->curr->prio ||
538                             (next && next->prio < src_rq->curr->prio))
539                                 goto bail;
540
541                         ret = 1;
542
543                         deactivate_task(src_rq, p, 0);
544                         set_task_cpu(p, this_cpu);
545                         activate_task(this_rq, p, 0);
546                         /*
547                          * We continue with the search, just in
548                          * case there's an even higher prio task
549                          * in another runqueue. (low likelyhood
550                          * but possible)
551                          */
552
553                         /*
554                          * Update next so that we won't pick a task
555                          * on another cpu with a priority lower (or equal)
556                          * than the one we just picked.
557                          */
558                         next = p;
559
560                 }
561  bail:
562                 spin_unlock(&src_rq->lock);
563         }
564
565         return ret;
566 }
567
568 static void schedule_balance_rt(struct rq *rq,
569                                 struct task_struct *prev)
570 {
571         /* Try to pull RT tasks here if we lower this rq's prio */
572         if (unlikely(rt_task(prev)) &&
573             rq->rt.highest_prio > prev->prio)
574                 pull_rt_task(rq);
575 }
576
577 static void schedule_tail_balance_rt(struct rq *rq)
578 {
579         /*
580          * If we have more than one rt_task queued, then
581          * see if we can push the other rt_tasks off to other CPUS.
582          * Note we may release the rq lock, and since
583          * the lock was owned by prev, we need to release it
584          * first via finish_lock_switch and then reaquire it here.
585          */
586         if (unlikely(rq->rt.rt_nr_running > 1)) {
587                 spin_lock_irq(&rq->lock);
588                 push_rt_tasks(rq);
589                 spin_unlock_irq(&rq->lock);
590         }
591 }
592
593
594 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
595 {
596         if (unlikely(rt_task(p)) &&
597             !task_running(rq, p) &&
598             (p->prio >= rq->curr->prio))
599                 push_rt_tasks(rq);
600 }
601
602 static unsigned long
603 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
604                 unsigned long max_load_move,
605                 struct sched_domain *sd, enum cpu_idle_type idle,
606                 int *all_pinned, int *this_best_prio)
607 {
608         /* don't touch RT tasks */
609         return 0;
610 }
611
612 static int
613 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
614                  struct sched_domain *sd, enum cpu_idle_type idle)
615 {
616         /* don't touch RT tasks */
617         return 0;
618 }
619 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
620 {
621         int weight = cpus_weight(*new_mask);
622
623         BUG_ON(!rt_task(p));
624
625         /*
626          * Update the migration status of the RQ if we have an RT task
627          * which is running AND changing its weight value.
628          */
629         if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
630                 struct rq *rq = task_rq(p);
631
632                 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
633                         rq->rt.rt_nr_migratory++;
634                 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
635                         BUG_ON(!rq->rt.rt_nr_migratory);
636                         rq->rt.rt_nr_migratory--;
637                 }
638
639                 update_rt_migration(rq);
640         }
641
642         p->cpus_allowed    = *new_mask;
643         p->nr_cpus_allowed = weight;
644 }
645 #else /* CONFIG_SMP */
646 # define schedule_tail_balance_rt(rq)   do { } while (0)
647 # define schedule_balance_rt(rq, prev)  do { } while (0)
648 # define wakeup_balance_rt(rq, p)       do { } while (0)
649 #endif /* CONFIG_SMP */
650
651 static void task_tick_rt(struct rq *rq, struct task_struct *p)
652 {
653         update_curr_rt(rq);
654
655         /*
656          * RR tasks need a special form of timeslice management.
657          * FIFO tasks have no timeslices.
658          */
659         if (p->policy != SCHED_RR)
660                 return;
661
662         if (--p->time_slice)
663                 return;
664
665         p->time_slice = DEF_TIMESLICE;
666
667         /*
668          * Requeue to the end of queue if we are not the only element
669          * on the queue:
670          */
671         if (p->run_list.prev != p->run_list.next) {
672                 requeue_task_rt(rq, p);
673                 set_tsk_need_resched(p);
674         }
675 }
676
677 static void set_curr_task_rt(struct rq *rq)
678 {
679         struct task_struct *p = rq->curr;
680
681         p->se.exec_start = rq->clock;
682 }
683
684 const struct sched_class rt_sched_class = {
685         .next                   = &fair_sched_class,
686         .enqueue_task           = enqueue_task_rt,
687         .dequeue_task           = dequeue_task_rt,
688         .yield_task             = yield_task_rt,
689 #ifdef CONFIG_SMP
690         .select_task_rq         = select_task_rq_rt,
691 #endif /* CONFIG_SMP */
692
693         .check_preempt_curr     = check_preempt_curr_rt,
694
695         .pick_next_task         = pick_next_task_rt,
696         .put_prev_task          = put_prev_task_rt,
697
698 #ifdef CONFIG_SMP
699         .load_balance           = load_balance_rt,
700         .move_one_task          = move_one_task_rt,
701         .set_cpus_allowed       = set_cpus_allowed_rt,
702 #endif
703
704         .set_curr_task          = set_curr_task_rt,
705         .task_tick              = task_tick_rt,
706 };