return &rt_rq->tg->rt_bandwidth;
}
-#else
+#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return &def_rt_bandwidth;
}
-#endif
+#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static int do_balance_runtime(struct rt_rq *rt_rq)
static void __enable_runtime(struct rq *rq)
{
- struct root_domain *rd = rq->rd;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return more;
}
-#else
+#else /* !CONFIG_SMP */
static inline int balance_runtime(struct rt_rq *rt_rq)
{
return 0;
}
-#endif
+#endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
spin_unlock(&rt_rq->rt_runtime_lock);
- }
+ } else if (rt_rq->rt_nr_running)
+ idle = 0;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
rt_se->timeout = 0;
enqueue_rt_entity(rt_se);
+
+ inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
update_curr_rt(rq);
dequeue_rt_entity(rt_se);
+
+ dec_cpu_load(rq, p->se.load.weight);
}
/*
void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
{
struct rt_prio_array *array = &rt_rq->active;
- struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (on_rt_rq(rt_se)) {
list_del_init(&rt_se->run_list);
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
-#endif
+#endif /* CONFIG_SCHED_DEBUG */