ieee1394: sbp2: use correct size of command descriptor block
[safe/jmp/linux-2.6] / kernel / sched.c
index 561b3b3..cfa222a 100644 (file)
 #include <asm/irq_regs.h>
 
 /*
- * Scheduler clock - returns current time in nanosec units.
- * This is default implementation.
- * Architectures and sub-architectures can override this.
- */
-unsigned long long __attribute__((weak)) sched_clock(void)
-{
-       return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
-}
-
-/*
  * Convert user-nice values [ -20 ... 0 ... 19 ]
  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  * and back.
@@ -321,7 +311,13 @@ static DEFINE_SPINLOCK(task_group_lock);
 # define INIT_TASK_GROUP_LOAD  NICE_0_LOAD
 #endif
 
+/*
+ * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
+ * (The default weight is 1024 - so there's no practical
+ *  limitation from this.)
+ */
 #define MIN_SHARES     2
+#define MAX_SHARES     (ULONG_MAX - 1)
 
 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 #endif
@@ -551,13 +547,7 @@ struct rq {
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 
-       u64 clock, prev_clock_raw;
-       s64 clock_max_delta;
-
-       unsigned int clock_warps, clock_overflows, clock_underflows;
-       u64 idle_clock;
-       unsigned int clock_deep_idle_events;
-       u64 tick_timestamp;
+       u64 clock;
 
        atomic_t nr_iowait;
 
@@ -622,82 +612,6 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
-#ifdef CONFIG_NO_HZ
-static inline bool nohz_on(int cpu)
-{
-       return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
-}
-
-static inline u64 max_skipped_ticks(struct rq *rq)
-{
-       return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
-}
-
-static inline void update_last_tick_seen(struct rq *rq)
-{
-       rq->last_tick_seen = jiffies;
-}
-#else
-static inline u64 max_skipped_ticks(struct rq *rq)
-{
-       return 1;
-}
-
-static inline void update_last_tick_seen(struct rq *rq)
-{
-}
-#endif
-
-/*
- * Update the per-runqueue clock, as finegrained as the platform can give
- * us, but without assuming monotonicity, etc.:
- */
-static void __update_rq_clock(struct rq *rq)
-{
-       u64 prev_raw = rq->prev_clock_raw;
-       u64 now = sched_clock();
-       s64 delta = now - prev_raw;
-       u64 clock = rq->clock;
-
-#ifdef CONFIG_SCHED_DEBUG
-       WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-#endif
-       /*
-        * Protect against sched_clock() occasionally going backwards:
-        */
-       if (unlikely(delta < 0)) {
-               clock++;
-               rq->clock_warps++;
-       } else {
-               /*
-                * Catch too large forward jumps too:
-                */
-               u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
-               u64 max_time = rq->tick_timestamp + max_jump;
-
-               if (unlikely(clock + delta > max_time)) {
-                       if (clock < max_time)
-                               clock = max_time;
-                       else
-                               clock++;
-                       rq->clock_overflows++;
-               } else {
-                       if (unlikely(delta > rq->clock_max_delta))
-                               rq->clock_max_delta = delta;
-                       clock += delta;
-               }
-       }
-
-       rq->prev_clock_raw = now;
-       rq->clock = clock;
-}
-
-static void update_rq_clock(struct rq *rq)
-{
-       if (likely(smp_processor_id() == cpu_of(rq)))
-               __update_rq_clock(rq);
-}
-
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -713,6 +627,11 @@ static void update_rq_clock(struct rq *rq)
 #define task_rq(p)             cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 
+static inline void update_rq_clock(struct rq *rq)
+{
+       rq->clock = sched_clock_cpu(cpu_of(rq));
+}
+
 /*
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
@@ -904,11 +823,14 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
 static DEFINE_SPINLOCK(time_sync_lock);
 static unsigned long long prev_global_time;
 
-static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
+static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&time_sync_lock, flags);
+       /*
+        * We want this inlined, to not get tracer function calls
+        * in this critical section:
+        */
+       spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
+       __raw_spin_lock(&time_sync_lock.raw_lock);
 
        if (time < prev_global_time) {
                per_cpu(time_offset, cpu) += prev_global_time - time;
@@ -917,7 +839,8 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
                prev_global_time = time;
        }
 
-       spin_unlock_irqrestore(&time_sync_lock, flags);
+       __raw_spin_unlock(&time_sync_lock.raw_lock);
+       spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
 
        return time;
 }
@@ -925,8 +848,6 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
 static unsigned long long __cpu_clock(int cpu)
 {
        unsigned long long now;
-       unsigned long flags;
-       struct rq *rq;
 
        /*
         * Only call sched_clock() if the scheduler has already been
@@ -935,11 +856,7 @@ static unsigned long long __cpu_clock(int cpu)
        if (unlikely(!scheduler_running))
                return 0;
 
-       local_irq_save(flags);
-       rq = cpu_rq(cpu);
-       update_rq_clock(rq);
-       now = rq->clock;
-       local_irq_restore(flags);
+       now = sched_clock_cpu(cpu);
 
        return now;
 }
@@ -951,13 +868,18 @@ static unsigned long long __cpu_clock(int cpu)
 unsigned long long cpu_clock(int cpu)
 {
        unsigned long long prev_cpu_time, time, delta_time;
+       unsigned long flags;
 
+       local_irq_save(flags);
        prev_cpu_time = per_cpu(prev_cpu_time, cpu);
        time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
        delta_time = time-prev_cpu_time;
 
-       if (unlikely(delta_time > time_sync_thresh))
+       if (unlikely(delta_time > time_sync_thresh)) {
                time = __sync_cpu_clock(time, cpu);
+               per_cpu(prev_cpu_time, cpu) = time;
+       }
+       local_irq_restore(flags);
 
        return time;
 }
@@ -1108,45 +1030,6 @@ static struct rq *this_rq_lock(void)
        return rq;
 }
 
-/*
- * We are going deep-idle (irqs are disabled):
- */
-void sched_clock_idle_sleep_event(void)
-{
-       struct rq *rq = cpu_rq(smp_processor_id());
-
-       WARN_ON(!irqs_disabled());
-       spin_lock(&rq->lock);
-       __update_rq_clock(rq);
-       spin_unlock(&rq->lock);
-       rq->clock_deep_idle_events++;
-}
-EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
-
-/*
- * We just idled delta nanoseconds (called with irqs disabled):
- */
-void sched_clock_idle_wakeup_event(u64 delta_ns)
-{
-       struct rq *rq = cpu_rq(smp_processor_id());
-       u64 now = sched_clock();
-
-       WARN_ON(!irqs_disabled());
-       rq->idle_clock += delta_ns;
-       /*
-        * Override the previous timestamp and ignore all
-        * sched_clock() deltas that occured while we idled,
-        * and use the PM-provided delta_ns to advance the
-        * rq clock:
-        */
-       spin_lock(&rq->lock);
-       rq->prev_clock_raw = now;
-       rq->clock += delta_ns;
-       spin_unlock(&rq->lock);
-       touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
-
 static void __resched_task(struct task_struct *p, int tif_bit);
 
 static inline void resched_task(struct task_struct *p)
@@ -1271,7 +1154,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
        spin_lock(&rq->lock);
-       __update_rq_clock(rq);
+       update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
        spin_unlock(&rq->lock);
 
@@ -1804,6 +1687,8 @@ __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
+       else if (shares > MAX_SHARES)
+               shares = MAX_SHARES;
 
        __set_se_shares(tg->se[tcpu], shares);
 }
@@ -4462,19 +4347,11 @@ void scheduler_tick(void)
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
        struct task_struct *curr = rq->curr;
-       u64 next_tick = rq->tick_timestamp + TICK_NSEC;
+
+       sched_clock_tick();
 
        spin_lock(&rq->lock);
-       __update_rq_clock(rq);
-       /*
-        * Let rq->clock advance by at least TICK_NSEC:
-        */
-       if (unlikely(rq->clock < next_tick)) {
-               rq->clock = next_tick;
-               rq->clock_underflows++;
-       }
-       rq->tick_timestamp = rq->clock;
-       update_last_tick_seen(rq);
+       update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        spin_unlock(&rq->lock);
@@ -4628,7 +4505,7 @@ need_resched_nonpreemptible:
         * Do the rq-clock update outside the rq lock:
         */
        local_irq_disable();
-       __update_rq_clock(rq);
+       update_rq_clock(rq);
        spin_lock(&rq->lock);
        clear_tsk_need_resched(prev);
 
@@ -4690,8 +4567,6 @@ EXPORT_SYMBOL(schedule);
 asmlinkage void __sched preempt_schedule(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /*
         * If there is a non-zero preempt_count or interrupts are disabled,
@@ -4702,16 +4577,7 @@ asmlinkage void __sched preempt_schedule(void)
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                schedule();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -4732,26 +4598,15 @@ EXPORT_SYMBOL(preempt_schedule);
 asmlinkage void __sched preempt_schedule_irq(void)
 {
        struct thread_info *ti = current_thread_info();
-       struct task_struct *task = current;
-       int saved_lock_depth;
 
        /* Catch callers which need to be fixed */
        BUG_ON(ti->preempt_count || !irqs_disabled());
 
        do {
                add_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * We keep the big kernel semaphore locked, but we
-                * clear ->lock_depth so that schedule() doesnt
-                * auto-release the semaphore:
-                */
-               saved_lock_depth = task->lock_depth;
-               task->lock_depth = -1;
                local_irq_enable();
                schedule();
                local_irq_disable();
-               task->lock_depth = saved_lock_depth;
                sub_preempt_count(PREEMPT_ACTIVE);
 
                /*
@@ -5670,7 +5525,6 @@ static void __cond_resched(void)
        } while (need_resched());
 }
 
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
 int __sched _cond_resched(void)
 {
        if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
@@ -5681,7 +5535,6 @@ int __sched _cond_resched(void)
        return 0;
 }
 EXPORT_SYMBOL(_cond_resched);
-#endif
 
 /*
  * cond_resched_lock() - if a reschedule is pending, drop the given lock,
@@ -5976,8 +5829,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
+#if defined(CONFIG_PREEMPT)
+       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
+#else
        task_thread_info(idle)->preempt_count = 0;
-
+#endif
        /*
         * The idle tasks have their own, simple scheduling class:
         */
@@ -8212,8 +8068,6 @@ void __init sched_init(void)
                spin_lock_init(&rq->lock);
                lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
-               rq->clock = 1;
-               update_last_tick_seen(rq);
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -8357,6 +8211,7 @@ EXPORT_SYMBOL(__might_sleep);
 static void normalize_task(struct rq *rq, struct task_struct *p)
 {
        int on_rq;
+
        update_rq_clock(rq);
        on_rq = p->se.on_rq;
        if (on_rq)
@@ -8388,7 +8243,6 @@ void normalize_rt_tasks(void)
                p->se.sleep_start               = 0;
                p->se.block_start               = 0;
 #endif
-               task_rq(p)->clock               = 0;
 
                if (!rt_task(p)) {
                        /*
@@ -8785,13 +8639,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        if (!tg->se[0])
                return -EINVAL;
 
-       /*
-        * A weight of 0 or 1 can cause arithmetics problems.
-        * (The default weight is 1024 - so there's no practical
-        *  limitation from this.)
-        */
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
+       else if (shares > MAX_SHARES)
+               shares = MAX_SHARES;
 
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
@@ -8816,7 +8667,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
                 * force a rebalance
                 */
                cfs_rq_set_shares(tg->cfs_rq[i], 0);
-               set_se_shares(tg->se[i], shares/nr_cpu_ids);
+               set_se_shares(tg->se[i], shares);
        }
 
        /*
@@ -9135,7 +8986,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
                                s64 val)
 {
        return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);