X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=kernel%2Fsched_idletask.c;h=5f93b570d3832cf47068e01942cdd8f36685a078;hp=5ebf829cdd736cf5883d4179c017a145b3020e52;hb=e071041be037eca208b62b84469a06bdfc692bea;hpb=29f59db3a74b0bdf78a1f5b53ef773caa82692dc diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 5ebf829..5f93b57 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -5,10 +5,16 @@ * handled in sched_fair.c) */ +#ifdef CONFIG_SMP +static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) +{ + return task_cpu(p); /* IDLE tasks as never migrated */ +} +#endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ -static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) +static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) { resched_task(rq->idle); } @@ -16,7 +22,8 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); - + /* adjust the active tasks as we might go into a long sleep */ + calc_load_account_active(rq); return rq->idle; } @@ -27,26 +34,35 @@ static struct task_struct *pick_next_task_idle(struct rq *rq) static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) { - spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); - spin_lock_irq(&rq->lock); + raw_spin_lock_irq(&rq->lock); } static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { } +#ifdef CONFIG_SMP static unsigned long load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, int *this_best_prio) + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) { return 0; } -static void task_tick_idle(struct rq *rq, struct task_struct *curr) +static int +move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) +{ + return 0; +} +#endif + +static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) { } @@ -54,10 +70,43 @@ static void set_curr_task_idle(struct rq *rq) { } +static void switched_to_idle(struct rq *rq, struct task_struct *p, + int running) +{ + /* Can this actually happen?? */ + if (running) + resched_task(rq->curr); + else + check_preempt_curr(rq, p, 0); +} + +static void prio_changed_idle(struct rq *rq, struct task_struct *p, + int oldprio, int running) +{ + /* This can happen for hot plug CPUS */ + + /* + * Reschedule if we are currently running on this runqueue and + * our priority decreased, or if we are not currently running on + * this runqueue and our priority is higher than the current's + */ + if (running) { + if (p->prio > oldprio) + resched_task(rq->curr); + } else + check_preempt_curr(rq, p, 0); +} + +unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) +{ + return 0; +} + /* * Simple, special scheduling class for the per-CPU idle tasks: */ -static struct sched_class idle_sched_class __read_mostly = { +static const struct sched_class idle_sched_class = { + /* .next is NULL */ /* no enqueue/yield_task for idle tasks */ /* dequeue is not valid, we print a debug message there: */ @@ -68,9 +117,20 @@ static struct sched_class idle_sched_class __read_mostly = { .pick_next_task = pick_next_task_idle, .put_prev_task = put_prev_task_idle, +#ifdef CONFIG_SMP + .select_task_rq = select_task_rq_idle, + .load_balance = load_balance_idle, + .move_one_task = move_one_task_idle, +#endif .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, + + .get_rr_interval = get_rr_interval_idle, + + .prio_changed = prio_changed_idle, + .switched_to = switched_to_idle, + /* no .task_new for idle tasks */ };