X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fcpu.c;h=181ae7086029e0b0eebb60cbc264b01b49f9297d;hb=3ea96615381157fc7b94549db559adabd7d4233f;hp=628f4ccda12790da9663a4bde4ac897607a8ebd2;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/cpu.c b/kernel/cpu.c index 628f4cc..181ae70 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -13,35 +13,80 @@ #include #include #include -#include +#include /* This protects CPUs going up and down... */ -DECLARE_MUTEX(cpucontrol); +static DEFINE_MUTEX(cpu_add_remove_lock); +static DEFINE_MUTEX(cpu_bitmask_lock); -static struct notifier_block *cpu_chain; +static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); + +/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. + * Should always be manipulated under cpu_add_remove_lock + */ +static int cpu_hotplug_disabled; + +#ifdef CONFIG_HOTPLUG_CPU + +/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ +static struct task_struct *recursive; +static int recursive_depth; + +void lock_cpu_hotplug(void) +{ + struct task_struct *tsk = current; + + if (tsk == recursive) { + static int warnings = 10; + if (warnings) { + printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); + WARN_ON(1); + warnings--; + } + recursive_depth++; + return; + } + mutex_lock(&cpu_bitmask_lock); + recursive = tsk; +} +EXPORT_SYMBOL_GPL(lock_cpu_hotplug); + +void unlock_cpu_hotplug(void) +{ + WARN_ON(recursive != current); + if (recursive_depth) { + recursive_depth--; + return; + } + recursive = NULL; + mutex_unlock(&cpu_bitmask_lock); +} +EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); + +#endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ -int register_cpu_notifier(struct notifier_block *nb) +int __cpuinit register_cpu_notifier(struct notifier_block *nb) { int ret; - - if ((ret = down_interruptible(&cpucontrol)) != 0) - return ret; - ret = notifier_chain_register(&cpu_chain, nb); - up(&cpucontrol); + mutex_lock(&cpu_add_remove_lock); + ret = raw_notifier_chain_register(&cpu_chain, nb); + mutex_unlock(&cpu_add_remove_lock); return ret; } + +#ifdef CONFIG_HOTPLUG_CPU + EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - down(&cpucontrol); - notifier_chain_unregister(&cpu_chain, nb); - up(&cpucontrol); + mutex_lock(&cpu_add_remove_lock); + raw_notifier_chain_unregister(&cpu_chain, nb); + mutex_unlock(&cpu_add_remove_lock); } EXPORT_SYMBOL(unregister_cpu_notifier); -#ifdef CONFIG_HOTPLUG_CPU static inline void check_for_tasks(int cpu) { struct task_struct *p; @@ -52,58 +97,65 @@ static inline void check_for_tasks(int cpu) (!cputime_eq(p->utime, cputime_zero) || !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ - (state = %ld, flags = %lx) \n", + (state = %ld, flags = %x) \n", p->comm, p->pid, cpu, p->state, p->flags); } write_unlock_irq(&tasklist_lock); } +struct take_cpu_down_param { + unsigned long mod; + void *hcpu; +}; + /* Take this CPU down. */ -static int take_cpu_down(void *unused) +static int take_cpu_down(void *_param) { + struct take_cpu_down_param *param = _param; int err; - /* Take offline: makes arch_cpu_down somewhat easier. */ - cpu_clear(smp_processor_id(), cpu_online_map); - + raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, + param->hcpu); /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) - cpu_set(smp_processor_id(), cpu_online_map); - else - /* Force idle task to run as soon as we yield: it should - immediately notice cpu is offline and die quickly. */ - sched_idle_next(); + return err; - return err; + /* Force idle task to run as soon as we yield: it should + immediately notice cpu is offline and die quickly. */ + sched_idle_next(); + return 0; } -int cpu_down(unsigned int cpu) +/* Requires cpu_add_remove_lock to be held */ +static int _cpu_down(unsigned int cpu, int tasks_frozen) { - int err; + int err, nr_calls = 0; struct task_struct *p; cpumask_t old_allowed, tmp; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; - if ((err = lock_cpu_hotplug_interruptible()) != 0) - return err; - - if (num_online_cpus() == 1) { - err = -EBUSY; - goto out; - } + if (num_online_cpus() == 1) + return -EBUSY; - if (!cpu_online(cpu)) { - err = -EINVAL; - goto out; - } + if (!cpu_online(cpu)) + return -EINVAL; - err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, - (void *)(long)cpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); + err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, + hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { + __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, + hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); err = -EINVAL; - goto out; + goto out_release; } /* Ensure that we are not runnable on dying cpu */ @@ -112,19 +164,22 @@ int cpu_down(unsigned int cpu) cpu_clear(cpu, tmp); set_cpus_allowed(current, tmp); - p = __stop_machine_run(take_cpu_down, NULL, cpu); - if (IS_ERR(p)) { + mutex_lock(&cpu_bitmask_lock); + p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); + mutex_unlock(&cpu_bitmask_lock); + + if (IS_ERR(p) || cpu_online(cpu)) { /* CPU didn't die: tell everyone. Can't complain. */ - if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, - (void *)(long)cpu) == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, + hcpu) == NOTIFY_BAD) BUG(); - err = PTR_ERR(p); - goto out_allowed; - } - - if (cpu_online(cpu)) + if (IS_ERR(p)) { + err = PTR_ERR(p); + goto out_allowed; + } goto out_thread; + } /* Wait for it to sleep (leaving idle task). */ while (!idle_cpu(cpu)) @@ -133,13 +188,9 @@ int cpu_down(unsigned int cpu) /* This actually kills the CPU. */ __cpu_die(cpu); - /* Move it here so it can run. */ - kthread_bind(p, get_cpu()); - put_cpu(); - /* CPU is completely dead: tell everyone. Too late to complain. */ - if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) - == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, + hcpu) == NOTIFY_BAD) BUG(); check_for_tasks(cpu); @@ -148,25 +199,39 @@ out_thread: err = kthread_stop(p); out_allowed: set_cpus_allowed(current, old_allowed); -out: - unlock_cpu_hotplug(); +out_release: + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); + return err; +} + +int cpu_down(unsigned int cpu) +{ + int err = 0; + + mutex_lock(&cpu_add_remove_lock); + if (cpu_hotplug_disabled) + err = -EBUSY; + else + err = _cpu_down(cpu, 0); + + mutex_unlock(&cpu_add_remove_lock); return err; } #endif /*CONFIG_HOTPLUG_CPU*/ -int __devinit cpu_up(unsigned int cpu) +/* Requires cpu_add_remove_lock to be held */ +static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) { - int ret; + int ret, nr_calls = 0; void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; - if ((ret = down_interruptible(&cpucontrol)) != 0) - return ret; + if (cpu_online(cpu) || !cpu_present(cpu)) + return -EINVAL; - if (cpu_online(cpu) || !cpu_present(cpu)) { - ret = -EINVAL; - goto out; - } - ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); + ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, + -1, &nr_calls); if (ret == NOTIFY_BAD) { printk("%s: attempt to bring up CPU %u failed\n", __FUNCTION__, cpu); @@ -175,19 +240,98 @@ int __devinit cpu_up(unsigned int cpu) } /* Arch-specific enabling code. */ + mutex_lock(&cpu_bitmask_lock); ret = __cpu_up(cpu); + mutex_unlock(&cpu_bitmask_lock); if (ret != 0) goto out_notify; - if (!cpu_online(cpu)) - BUG(); + BUG_ON(!cpu_online(cpu)); /* Now call notifier in preparation. */ - notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); out_notify: if (ret != 0) - notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); -out: - up(&cpucontrol); + __raw_notifier_call_chain(&cpu_chain, + CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); + return ret; } + +int __cpuinit cpu_up(unsigned int cpu) +{ + int err = 0; + + mutex_lock(&cpu_add_remove_lock); + if (cpu_hotplug_disabled) + err = -EBUSY; + else + err = _cpu_up(cpu, 0); + + mutex_unlock(&cpu_add_remove_lock); + return err; +} + +#ifdef CONFIG_SUSPEND_SMP +static cpumask_t frozen_cpus; + +int disable_nonboot_cpus(void) +{ + int cpu, first_cpu, error = 0; + + mutex_lock(&cpu_add_remove_lock); + first_cpu = first_cpu(cpu_online_map); + /* We take down all of the non-boot CPUs in one shot to avoid races + * with the userspace trying to use the CPU hotplug at the same time + */ + cpus_clear(frozen_cpus); + printk("Disabling non-boot CPUs ...\n"); + for_each_online_cpu(cpu) { + if (cpu == first_cpu) + continue; + error = _cpu_down(cpu, 1); + if (!error) { + cpu_set(cpu, frozen_cpus); + printk("CPU%d is down\n", cpu); + } else { + printk(KERN_ERR "Error taking CPU%d down: %d\n", + cpu, error); + break; + } + } + if (!error) { + BUG_ON(num_online_cpus() > 1); + /* Make sure the CPUs won't be enabled by someone else */ + cpu_hotplug_disabled = 1; + } else { + printk(KERN_ERR "Non-boot CPUs are not disabled\n"); + } + mutex_unlock(&cpu_add_remove_lock); + return error; +} + +void enable_nonboot_cpus(void) +{ + int cpu, error; + + /* Allow everyone to use the CPU hotplug again */ + mutex_lock(&cpu_add_remove_lock); + cpu_hotplug_disabled = 0; + if (cpus_empty(frozen_cpus)) + goto out; + + printk("Enabling non-boot CPUs ...\n"); + for_each_cpu_mask(cpu, frozen_cpus) { + error = _cpu_up(cpu, 1); + if (!error) { + printk("CPU%d is up\n", cpu); + continue; + } + printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); + } + cpus_clear(frozen_cpus); +out: + mutex_unlock(&cpu_add_remove_lock); +} +#endif