parisc: convert /proc/pdc/{lcd,led} to seq_file
[safe/jmp/linux-2.6] / kernel / sched.c
index bc68037..18cceee 100644 (file)
@@ -141,7 +141,7 @@ struct rt_prio_array {
 
 struct rt_bandwidth {
        /* nests inside the rq lock: */
-       spinlock_t              rt_runtime_lock;
+       raw_spinlock_t          rt_runtime_lock;
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        rt_b->rt_period = ns_to_ktime(period);
        rt_b->rt_runtime = runtime;
 
-       spin_lock_init(&rt_b->rt_runtime_lock);
+       raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (hrtimer_active(&rt_b->rt_period_timer))
                return;
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
                unsigned long delta;
                ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
                                HRTIMER_MODE_ABS_PINNED, 0);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -470,7 +470,7 @@ struct rt_rq {
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-       spinlock_t rt_runtime_lock;
+       raw_spinlock_t rt_runtime_lock;
 
 #ifdef CONFIG_RT_GROUP_SCHED
        unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
  */
 struct rq {
        /* runqueue lock: */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        /*
         * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
  */
 int runqueue_is_locked(int cpu)
 {
-       return spin_is_locked(&cpu_rq(cpu)->lock);
+       return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -893,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 }
 
 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
        next->oncpu = 1;
 #endif
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 #else
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 #endif
 }
 
@@ -949,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 {
        for (;;) {
                struct rq *rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
        }
 }
 
@@ -969,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        for (;;) {
                local_irq_save(*flags);
                rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock_irqrestore(&rq->lock, *flags);
+               raw_spin_unlock_irqrestore(&rq->lock, *flags);
        }
 }
 
@@ -981,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
        struct rq *rq = task_rq(p);
 
        smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-       spin_unlock_wait(&rq->lock);
+       raw_spin_unlock_wait(&rq->lock);
 }
 
 static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
 {
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
        __releases(rq->lock)
 {
-       spin_unlock_irqrestore(&rq->lock, *flags);
+       raw_spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
 /*
@@ -1006,7 +1006,7 @@ static struct rq *this_rq_lock(void)
 
        local_irq_disable();
        rq = this_rq();
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 
        return rq;
 }
@@ -1053,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        return HRTIMER_NORESTART;
 }
@@ -1069,10 +1069,10 @@ static void __hrtick_start(void *arg)
 {
        struct rq *rq = arg;
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        hrtimer_restart(&rq->hrtick_timer);
        rq->hrtick_csd_pending = 0;
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -1179,7 +1179,7 @@ static void resched_task(struct task_struct *p)
 {
        int cpu;
 
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
 
        if (test_tsk_need_resched(p))
                return;
@@ -1201,10 +1201,10 @@ static void resched_cpu(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!spin_trylock_irqsave(&rq->lock, flags))
+       if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
        resched_task(cpu_curr(cpu));
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 #ifdef CONFIG_NO_HZ
@@ -1273,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 
@@ -1600,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
                struct rq *rq = cpu_rq(cpu);
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
                tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
                __set_se_shares(tg->se[cpu], shares);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 }
 
@@ -1706,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
        if (root_task_group_empty())
                return;
 
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
        update_shares(sd);
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 }
 
 static void update_h_load(long cpu)
@@ -1748,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
-       spin_unlock(&this_rq->lock);
+       raw_spin_unlock(&this_rq->lock);
        double_rq_lock(this_rq, busiest);
 
        return 1;
@@ -1769,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        int ret = 0;
 
-       if (unlikely(!spin_trylock(&busiest->lock))) {
+       if (unlikely(!raw_spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
-                       spin_unlock(&this_rq->lock);
-                       spin_lock(&busiest->lock);
-                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_unlock(&this_rq->lock);
+                       raw_spin_lock(&busiest->lock);
+                       raw_spin_lock_nested(&this_rq->lock,
+                                             SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock_nested(&busiest->lock,
+                                             SINGLE_DEPTH_NESTING);
        }
        return ret;
 }
@@ -1790,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        if (unlikely(!irqs_disabled())) {
                /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                BUG_ON(1);
        }
 
@@ -1800,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       spin_unlock(&busiest->lock);
+       raw_spin_unlock(&busiest->lock);
        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
 #endif
@@ -2023,13 +2025,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
                return;
        }
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        update_rq_clock(rq);
        set_task_cpu(p, cpu);
        p->cpus_allowed = cpumask_of_cpu(cpu);
        p->rt.nr_cpus_allowed = 1;
        p->flags |= PF_THREAD_BOUND;
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -2781,10 +2783,10 @@ static inline void post_schedule(struct rq *rq)
        if (rq->post_schedule) {
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->curr->sched_class->post_schedule)
                        rq->curr->sched_class->post_schedule(rq);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
 
                rq->post_schedule = 0;
        }
@@ -3066,15 +3068,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
 {
        BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
-               spin_lock(&rq1->lock);
+               raw_spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
                if (rq1 < rq2) {
-                       spin_lock(&rq1->lock);
-                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq1->lock);
+                       raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
-                       spin_lock(&rq2->lock);
-                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq2->lock);
+                       raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@ -3091,9 +3093,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       spin_unlock(&rq1->lock);
+       raw_spin_unlock(&rq1->lock);
        if (rq1 != rq2)
-               spin_unlock(&rq2->lock);
+               raw_spin_unlock(&rq2->lock);
        else
                __release(rq2->lock);
 }
@@ -4186,14 +4188,15 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock_irqsave(&busiest->lock, flags);
+                       raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
                                              &busiest->curr->cpus_allowed)) {
-                               spin_unlock_irqrestore(&busiest->lock, flags);
+                               raw_spin_unlock_irqrestore(&busiest->lock,
+                                                           flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -4203,7 +4206,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock_irqrestore(&busiest->lock, flags);
+                       raw_spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);
 
@@ -4385,10 +4388,10 @@ redo:
                /*
                 * Should not call ttwu while holding a rq->lock
                 */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                if (active_balance)
                        wake_up_process(busiest->migration_thread);
-               spin_lock(&this_rq->lock);
+               raw_spin_lock(&this_rq->lock);
 
        } else
                sd->nr_balance_failed = 0;
@@ -5257,11 +5260,11 @@ void scheduler_tick(void)
 
        sched_clock_tick();
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick(curr, cpu);
 
@@ -5455,7 +5458,7 @@ need_resched_nonpreemptible:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);
 
@@ -5491,7 +5494,7 @@ need_resched_nonpreemptible:
                cpu = smp_processor_id();
                rq = cpu_rq(cpu);
        } else
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
 
@@ -6320,7 +6323,7 @@ recheck:
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         */
-       spin_lock_irqsave(&p->pi_lock, flags);
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -6330,7 +6333,7 @@ recheck:
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                __task_rq_unlock(rq);
-               spin_unlock_irqrestore(&p->pi_lock, flags);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        update_rq_clock(rq);
@@ -6354,7 +6357,7 @@ recheck:
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
-       spin_unlock_irqrestore(&p->pi_lock, flags);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
        rt_mutex_adjust_pi(p);
 
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
         */
        __release(rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-       _raw_spin_unlock(&rq->lock);
+       do_raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
        schedule();
@@ -6980,7 +6983,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
        idle->se.exec_start = sched_clock();
@@ -6992,7 +6995,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        idle->oncpu = 1;
 #endif
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
 #if defined(CONFIG_PREEMPT)
@@ -7209,10 +7212,10 @@ static int migration_thread(void *data)
                struct migration_req *req;
                struct list_head *head;
 
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
 
                if (cpu_is_offline(cpu)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        break;
                }
 
@@ -7224,7 +7227,7 @@ static int migration_thread(void *data)
                head = &rq->migration_queue;
 
                if (list_empty(head)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        schedule();
                        set_current_state(TASK_INTERRUPTIBLE);
                        continue;
@@ -7233,14 +7236,14 @@ static int migration_thread(void *data)
                list_del_init(head->next);
 
                if (req->task != NULL) {
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        __migrate_task(req->task, cpu, req->dest_cpu);
                } else if (likely(cpu == (badcpu = smp_processor_id()))) {
                        req->dest_cpu = RCU_MIGRATION_GOT_QS;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                } else {
                        req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
                }
                local_irq_enable();
@@ -7363,14 +7366,14 @@ void sched_idle_next(void)
         * Strictly not necessary since rest of the CPUs are stopped by now
         * and interrupts disabled on the current cpu.
         */
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
        update_rq_clock(rq);
        activate_task(rq, p, 0);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 /*
@@ -7406,9 +7409,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
         * that's OK. No task can be added to this CPU, so iteration is
         * fine.
         */
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
        move_task_off_dead_cpu(dead_cpu, p);
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
 
        put_task_struct(p);
 }
@@ -7449,17 +7452,16 @@ static struct ctl_table sd_ctl_dir[] = {
                .procname       = "sched_domain",
                .mode           = 0555,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table sd_ctl_root[] = {
        {
-               .ctl_name       = CTL_KERN,
                .procname       = "kernel",
                .mode           = 0555,
                .child          = sd_ctl_dir,
        },
-       {0, },
+       {}
 };
 
 static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7675,13 +7677,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 
                        set_rq_online(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -7706,13 +7708,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                put_task_struct(rq->migration_thread);
                rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
@@ -7722,30 +7724,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                 * they didn't take sched_hotcpu_mutex. Just wake up
                 * the requestors.
                 */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
                        struct migration_req *req;
 
                        req = list_entry(rq->migration_queue.next,
                                         struct migration_req, list);
                        list_del_init(&req->list);
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        complete(&req->done);
-                       spin_lock_irq(&rq->lock);
+                       raw_spin_lock_irq(&rq->lock);
                }
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                break;
 
        case CPU_DYING:
        case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 #endif
        }
@@ -7975,7 +7977,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        struct root_domain *old_rd = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
                old_rd = rq->rd;
@@ -8001,7 +8003,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
                set_rq_online(rq);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        if (old_rd)
                free_rootdomain(old_rd);
@@ -8287,14 +8289,14 @@ enum s_alloc {
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
                 struct sched_group **sg, struct cpumask *unused)
 {
        if (sg)
-               *sg = &per_cpu(sched_group_cpus, cpu).sg;
+               *sg = &per_cpu(sched_groups, cpu).sg;
        return cpu;
 }
 #endif /* CONFIG_SCHED_SMT */
@@ -9358,13 +9360,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
 #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
-       plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+       plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
 #endif
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        rt_rq->rt_runtime = 0;
-       spin_lock_init(&rt_rq->rt_runtime_lock);
+       raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->rt_nr_boosted = 0;
@@ -9524,7 +9526,7 @@ void __init sched_init(void)
                struct rq *rq;
 
                rq = cpu_rq(i);
-               spin_lock_init(&rq->lock);
+               raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9584,7 +9586,7 @@ void __init sched_init(void)
 #elif defined CONFIG_USER_SCHED
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
                init_tg_rt_entry(&init_task_group,
-                               &per_cpu(init_rt_rq, i),
+                               &per_cpu(init_rt_rq_var, i),
                                &per_cpu(init_sched_rt_entity, i), i, 1,
                                root_task_group.rt_se[i]);
 #endif
@@ -9622,7 +9624,7 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
 #endif
 
        /*
@@ -9747,13 +9749,13 @@ void normalize_rt_tasks(void)
                        continue;
                }
 
-               spin_lock(&p->pi_lock);
+               raw_spin_lock(&p->pi_lock);
                rq = __task_rq_lock(p);
 
                normalize_task(rq, p);
 
                __task_rq_unlock(rq);
-               spin_unlock(&p->pi_lock);
+               raw_spin_unlock(&p->pi_lock);
        } while_each_thread(g, p);
 
        read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10116,9 +10118,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
        struct rq *rq = cfs_rq->rq;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __set_se_shares(se, shares);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static DEFINE_MUTEX(shares_mutex);
@@ -10303,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg,
        if (err)
                goto unlock;
 
-       spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
        tg->rt_bandwidth.rt_runtime = rt_runtime;
 
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = tg->rt_rq[i];
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_runtime;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
@@ -10419,15 +10421,15 @@ static int sched_rt_global_constraints(void)
        if (sysctl_sched_rt_runtime == 0)
                return -EBUSY;
 
-       spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = global_rt_runtime();
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
        return 0;
 }
@@ -10718,9 +10720,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
        /*
         * Take rq->lock to make 64-bit read safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        data = *cpuusage;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        data = *cpuusage;
 #endif
@@ -10736,9 +10738,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
        /*
         * Take rq->lock to make 64-bit write safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        *cpuusage = val;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        *cpuusage = val;
 #endif
@@ -10972,9 +10974,9 @@ void synchronize_sched_expedited(void)
                init_completion(&req->done);
                req->task = NULL;
                req->dest_cpu = RCU_MIGRATION_NEED_QS;
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                list_add(&req->list, &rq->migration_queue);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                wake_up_process(rq->migration_thread);
        }
        for_each_online_cpu(cpu) {
@@ -10982,11 +10984,11 @@ void synchronize_sched_expedited(void)
                req = &per_cpu(rcu_migration_req, cpu);
                rq = cpu_rq(cpu);
                wait_for_completion(&req->done);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
                        need_full_sync = 1;
                req->dest_cpu = RCU_MIGRATION_IDLE;
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
        rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
        synchronize_sched_expedited_count++;