[SCSI] Block I/O while SG reset operation in progress - the midlayer patch
[safe/jmp/linux-2.6] / kernel / sched.c
index 2e8a146..f06d059 100644 (file)
@@ -665,55 +665,13 @@ static int effective_prio(task_t *p)
 }
 
 /*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired, and switch periodically
- * regardless, to ensure that highly interactive tasks do not starve
- * the less fortunate for unreasonably long periods.
- */
-static inline int expired_starving(runqueue_t *rq)
-{
-       int limit;
-
-       /*
-        * Arrays were recently switched, all is well
-        */
-       if (!rq->expired_timestamp)
-               return 0;
-
-       limit = STARVATION_LIMIT * rq->nr_running;
-
-       /*
-        * It's time to switch arrays
-        */
-       if (jiffies - rq->expired_timestamp >= limit)
-               return 1;
-
-       /*
-        * There's a better selection in the expired array
-        */
-       if (rq->curr->static_prio > rq->best_expired_prio)
-               return 1;
-
-       /*
-        * All is well
-        */
-       return 0;
-}
-
-/*
  * __activate_task - move a task to the runqueue.
  */
 static void __activate_task(task_t *p, runqueue_t *rq)
 {
        prio_array_t *target = rq->active;
 
-       if (unlikely(batch_task(p) || expired_starving(rq)))
+       if (batch_task(p))
                target = rq->expired;
        enqueue_task(p, target);
        rq->nr_running++;
@@ -2532,6 +2490,22 @@ unsigned long long current_sched_time(const task_t *tsk)
 }
 
 /*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired:
+ */
+#define EXPIRED_STARVING(rq) \
+       ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
+               (jiffies - (rq)->expired_timestamp >= \
+                       STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
+                       ((rq)->curr->static_prio > (rq)->best_expired_prio))
+
+/*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
  * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2666,7 +2640,7 @@ void scheduler_tick(void)
 
                if (!rq->expired_timestamp)
                        rq->expired_timestamp = jiffies;
-               if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
+               if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
                        enqueue_task(p, rq->expired);
                        if (p->static_prio < rq->best_expired_prio)
                                rq->best_expired_prio = p->static_prio;
@@ -3912,6 +3886,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
                        !capable(CAP_SYS_NICE))
                goto out_unlock;
 
+       retval = security_task_setscheduler(p, 0, NULL);
+       if (retval)
+               goto out_unlock;
+
        cpus_allowed = cpuset_cpus_allowed(p);
        cpus_and(new_mask, new_mask, cpus_allowed);
        retval = set_cpus_allowed(p, new_mask);
@@ -3980,7 +3958,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        if (!p)
                goto out_unlock;
 
-       retval = 0;
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
 
 out_unlock:
@@ -4072,6 +4053,9 @@ asmlinkage long sys_sched_yield(void)
 
 static inline void __cond_resched(void)
 {
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+       __might_sleep(__FILE__, __LINE__);
+#endif
        /*
         * The BKS might be reacquired before we have dropped
         * PREEMPT_ACTIVE, which could trigger a second
@@ -4168,7 +4152,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -4179,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4772,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+               if (!cpu_rq(cpu)->migration_thread)
+                       break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
                kthread_bind(cpu_rq(cpu)->migration_thread,
                             any_online_cpu(cpu_online_map));
@@ -4814,7 +4800,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
 /* Register at highest priority so that task migration (migrate_all_tasks)
  * happens before everything else.
  */
-static struct notifier_block __devinitdata migration_notifier = {
+static struct notifier_block migration_notifier = {
        .notifier_call = migration_call,
        .priority = 10
 };