X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fsched_idletask.c;h=b133a28fcde32cbf20421f077686c14319871f74;hb=9b269d4034c7855ac34f0985cc55ee29bd80e80a;hp=f69e083e0d96e81f55d5b68b7b6d9cf348f807a2;hpb=f02231e51a280f1a0fee4d03ad8f50048e06cced;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index f69e083..b133a28 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -5,18 +5,25 @@ * handled in sched_fair.c) */ +#ifdef CONFIG_SMP +static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) +{ + return task_cpu(p); /* IDLE tasks as never migrated */ +} +#endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ -static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) +static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) { resched_task(rq->idle); } -static struct task_struct *pick_next_task_idle(struct rq *rq, u64 now) +static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); - + /* adjust the active tasks as we might go into a long sleep */ + calc_load_account_active(rq); return rq->idle; } @@ -33,27 +40,73 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) spin_lock_irq(&rq->lock); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, u64 now) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } +#ifdef CONFIG_SMP static unsigned long load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) +{ + return 0; +} + +static int +move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) { return 0; } +#endif + +static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) +{ +} + +static void set_curr_task_idle(struct rq *rq) +{ +} -static void task_tick_idle(struct rq *rq, struct task_struct *curr) +static void switched_to_idle(struct rq *rq, struct task_struct *p, + int running) { + /* Can this actually happen?? */ + if (running) + resched_task(rq->curr); + else + check_preempt_curr(rq, p, 0); +} + +static void prio_changed_idle(struct rq *rq, struct task_struct *p, + int oldprio, int running) +{ + /* This can happen for hot plug CPUS */ + + /* + * Reschedule if we are currently running on this runqueue and + * our priority decreased, or if we are not currently running on + * this runqueue and our priority is higher than the current's + */ + if (running) { + if (p->prio > oldprio) + resched_task(rq->curr); + } else + check_preempt_curr(rq, p, 0); +} + +unsigned int get_rr_interval_idle(struct task_struct *task) +{ + return 0; } /* * Simple, special scheduling class for the per-CPU idle tasks: */ -static struct sched_class idle_sched_class __read_mostly = { +static const struct sched_class idle_sched_class = { + /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ @@ -64,8 +117,20 @@ static struct sched_class idle_sched_class __read_mostly = { .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, + .load_balance = load_balance_idle, + .move_one_task = move_one_task_idle, +#endif + .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, + + .get_rr_interval = get_rr_interval_idle, + + .prio_changed = prio_changed_idle, + .switched_to = switched_to_idle, + /* no .task_new for idle tasks */ };