2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
10 static void update_curr_rt(struct rq *rq)
12 struct task_struct *curr = rq->curr;
15 if (!task_has_rt_policy(curr))
18 delta_exec = rq->clock - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0))
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = rq->clock;
26 cpuacct_charge(curr, delta_exec);
29 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
32 rq->rt.rt_nr_running++;
35 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
38 WARN_ON(!rq->rt.rt_nr_running);
39 rq->rt.rt_nr_running--;
42 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
44 struct rt_prio_array *array = &rq->rt.active;
46 list_add_tail(&p->run_list, array->queue + p->prio);
47 __set_bit(p->prio, array->bitmap);
48 inc_cpu_load(rq, p->se.load.weight);
54 * Adding/removing a task to/from a priority array:
56 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
58 struct rt_prio_array *array = &rq->rt.active;
62 list_del(&p->run_list);
63 if (list_empty(array->queue + p->prio))
64 __clear_bit(p->prio, array->bitmap);
65 dec_cpu_load(rq, p->se.load.weight);
71 * Put task to the end of the run list without the overhead of dequeue
72 * followed by enqueue.
74 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
76 struct rt_prio_array *array = &rq->rt.active;
78 list_move_tail(&p->run_list, array->queue + p->prio);
82 yield_task_rt(struct rq *rq)
84 requeue_task_rt(rq, rq->curr);
88 * Preempt the current task with a newly woken task if needed:
90 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
92 if (p->prio < rq->curr->prio)
93 resched_task(rq->curr);
96 static struct task_struct *pick_next_task_rt(struct rq *rq)
98 struct rt_prio_array *array = &rq->rt.active;
99 struct task_struct *next;
100 struct list_head *queue;
103 idx = sched_find_first_bit(array->bitmap);
104 if (idx >= MAX_RT_PRIO)
107 queue = array->queue + idx;
108 next = list_entry(queue->next, struct task_struct, run_list);
110 next->se.exec_start = rq->clock;
115 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
118 p->se.exec_start = 0;
123 * Load-balancing iterator. Note: while the runqueue stays locked
124 * during the whole iteration, the current task might be
125 * dequeued so the iterator has to be dequeue-safe. Here we
126 * achieve that by always pre-iterating before returning
129 static struct task_struct *load_balance_start_rt(void *arg)
132 struct rt_prio_array *array = &rq->rt.active;
133 struct list_head *head, *curr;
134 struct task_struct *p;
137 idx = sched_find_first_bit(array->bitmap);
138 if (idx >= MAX_RT_PRIO)
141 head = array->queue + idx;
144 p = list_entry(curr, struct task_struct, run_list);
148 rq->rt.rt_load_balance_idx = idx;
149 rq->rt.rt_load_balance_head = head;
150 rq->rt.rt_load_balance_curr = curr;
155 static struct task_struct *load_balance_next_rt(void *arg)
158 struct rt_prio_array *array = &rq->rt.active;
159 struct list_head *head, *curr;
160 struct task_struct *p;
163 idx = rq->rt.rt_load_balance_idx;
164 head = rq->rt.rt_load_balance_head;
165 curr = rq->rt.rt_load_balance_curr;
168 * If we arrived back to the head again then
169 * iterate to the next queue (if any):
171 if (unlikely(head == curr)) {
172 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
174 if (next_idx >= MAX_RT_PRIO)
178 head = array->queue + idx;
181 rq->rt.rt_load_balance_idx = idx;
182 rq->rt.rt_load_balance_head = head;
185 p = list_entry(curr, struct task_struct, run_list);
189 rq->rt.rt_load_balance_curr = curr;
195 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
196 unsigned long max_load_move,
197 struct sched_domain *sd, enum cpu_idle_type idle,
198 int *all_pinned, int *this_best_prio)
200 struct rq_iterator rt_rq_iterator;
202 rt_rq_iterator.start = load_balance_start_rt;
203 rt_rq_iterator.next = load_balance_next_rt;
204 /* pass 'busiest' rq argument into
205 * load_balance_[start|next]_rt iterators
207 rt_rq_iterator.arg = busiest;
209 return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
210 idle, all_pinned, this_best_prio, &rt_rq_iterator);
214 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
215 struct sched_domain *sd, enum cpu_idle_type idle)
217 struct rq_iterator rt_rq_iterator;
219 rt_rq_iterator.start = load_balance_start_rt;
220 rt_rq_iterator.next = load_balance_next_rt;
221 rt_rq_iterator.arg = busiest;
223 return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
228 static void task_tick_rt(struct rq *rq, struct task_struct *p)
233 * RR tasks need a special form of timeslice management.
234 * FIFO tasks have no timeslices.
236 if (p->policy != SCHED_RR)
242 p->time_slice = DEF_TIMESLICE;
245 * Requeue to the end of queue if we are not the only element
248 if (p->run_list.prev != p->run_list.next) {
249 requeue_task_rt(rq, p);
250 set_tsk_need_resched(p);
254 static void set_curr_task_rt(struct rq *rq)
256 struct task_struct *p = rq->curr;
258 p->se.exec_start = rq->clock;
261 const struct sched_class rt_sched_class = {
262 .next = &fair_sched_class,
263 .enqueue_task = enqueue_task_rt,
264 .dequeue_task = dequeue_task_rt,
265 .yield_task = yield_task_rt,
267 .check_preempt_curr = check_preempt_curr_rt,
269 .pick_next_task = pick_next_task_rt,
270 .put_prev_task = put_prev_task_rt,
273 .load_balance = load_balance_rt,
274 .move_one_task = move_one_task_rt,
277 .set_curr_task = set_curr_task_rt,
278 .task_tick = task_tick_rt,