X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Fcpu.c;h=6ba0f1ecb21230de60f6ab9fd7b2f58907b48d55;hb=fbfecd3712f917ca210a55c157233d88b785896b;hp=03dcd981846a6b8020a07c058312c7277621be49;hpb=81615b624a45621b758380ec45d750483eae281d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/cpu.c b/kernel/cpu.c index 03dcd98..6ba0f1e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -15,73 +15,136 @@ #include #include -/* This protects CPUs going up and down... */ -static DEFINE_MUTEX(cpucontrol); +#ifdef CONFIG_SMP +/* Serializes the updates to cpu_online_mask, cpu_present_mask */ +static DEFINE_MUTEX(cpu_add_remove_lock); -static BLOCKING_NOTIFIER_HEAD(cpu_chain); +static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); + +/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. + * Should always be manipulated under cpu_add_remove_lock + */ +static int cpu_hotplug_disabled; + +static struct { + struct task_struct *active_writer; + struct mutex lock; /* Synchronizes accesses to refcount, */ + /* + * Also blocks the new readers during + * an ongoing cpu hotplug operation. + */ + int refcount; +} cpu_hotplug = { + .active_writer = NULL, + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), + .refcount = 0, +}; #ifdef CONFIG_HOTPLUG_CPU -static struct task_struct *lock_cpu_hotplug_owner; -static int lock_cpu_hotplug_depth; -static int __lock_cpu_hotplug(int interruptible) +void get_online_cpus(void) { - int ret = 0; + might_sleep(); + if (cpu_hotplug.active_writer == current) + return; + mutex_lock(&cpu_hotplug.lock); + cpu_hotplug.refcount++; + mutex_unlock(&cpu_hotplug.lock); - if (lock_cpu_hotplug_owner != current) { - if (interruptible) - ret = mutex_lock_interruptible(&cpucontrol); - else - mutex_lock(&cpucontrol); - } +} +EXPORT_SYMBOL_GPL(get_online_cpus); - /* - * Set only if we succeed in locking - */ - if (!ret) { - lock_cpu_hotplug_depth++; - lock_cpu_hotplug_owner = current; - } +void put_online_cpus(void) +{ + if (cpu_hotplug.active_writer == current) + return; + mutex_lock(&cpu_hotplug.lock); + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) + wake_up_process(cpu_hotplug.active_writer); + mutex_unlock(&cpu_hotplug.lock); - return ret; } +EXPORT_SYMBOL_GPL(put_online_cpus); -void lock_cpu_hotplug(void) +#endif /* CONFIG_HOTPLUG_CPU */ + +/* + * The following two API's must be used when attempting + * to serialize the updates to cpu_online_mask, cpu_present_mask. + */ +void cpu_maps_update_begin(void) { - __lock_cpu_hotplug(0); + mutex_lock(&cpu_add_remove_lock); } -EXPORT_SYMBOL_GPL(lock_cpu_hotplug); -void unlock_cpu_hotplug(void) +void cpu_maps_update_done(void) { - if (--lock_cpu_hotplug_depth == 0) { - lock_cpu_hotplug_owner = NULL; - mutex_unlock(&cpucontrol); - } + mutex_unlock(&cpu_add_remove_lock); } -EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); -int lock_cpu_hotplug_interruptible(void) +/* + * This ensures that the hotplug operation can begin only when the + * refcount goes to zero. + * + * Note that during a cpu-hotplug operation, the new readers, if any, + * will be blocked by the cpu_hotplug.lock + * + * Since cpu_hotplug_begin() is always called after invoking + * cpu_maps_update_begin(), we can be sure that only one writer is active. + * + * Note that theoretically, there is a possibility of a livelock: + * - Refcount goes to zero, last reader wakes up the sleeping + * writer. + * - Last reader unlocks the cpu_hotplug.lock. + * - A new reader arrives at this moment, bumps up the refcount. + * - The writer acquires the cpu_hotplug.lock finds the refcount + * non zero and goes to sleep again. + * + * However, this is very difficult to achieve in practice since + * get_online_cpus() not an api which is called all that often. + * + */ +static void cpu_hotplug_begin(void) { - return __lock_cpu_hotplug(1); + cpu_hotplug.active_writer = current; + + for (;;) { + mutex_lock(&cpu_hotplug.lock); + if (likely(!cpu_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); + mutex_unlock(&cpu_hotplug.lock); + schedule(); + } } -EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); -#endif /* CONFIG_HOTPLUG_CPU */ +static void cpu_hotplug_done(void) +{ + cpu_hotplug.active_writer = NULL; + mutex_unlock(&cpu_hotplug.lock); +} /* Need to know about CPUs going up/down? */ -int register_cpu_notifier(struct notifier_block *nb) +int __ref register_cpu_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_register(&cpu_chain, nb); + int ret; + cpu_maps_update_begin(); + ret = raw_notifier_chain_register(&cpu_chain, nb); + cpu_maps_update_done(); + return ret; } + +#ifdef CONFIG_HOTPLUG_CPU + EXPORT_SYMBOL(register_cpu_notifier); -void unregister_cpu_notifier(struct notifier_block *nb) +void __ref unregister_cpu_notifier(struct notifier_block *nb) { - blocking_notifier_chain_unregister(&cpu_chain, nb); + cpu_maps_update_begin(); + raw_notifier_chain_unregister(&cpu_chain, nb); + cpu_maps_update_done(); } EXPORT_SYMBOL(unregister_cpu_notifier); -#ifdef CONFIG_HOTPLUG_CPU static inline void check_for_tasks(int cpu) { struct task_struct *p; @@ -92,15 +155,22 @@ static inline void check_for_tasks(int cpu) (!cputime_eq(p->utime, cputime_zero) || !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ - (state = %ld, flags = %lx) \n", - p->comm, p->pid, cpu, p->state, p->flags); + (state = %ld, flags = %x) \n", + p->comm, task_pid_nr(p), cpu, + p->state, p->flags); } write_unlock_irq(&tasklist_lock); } +struct take_cpu_down_param { + unsigned long mod; + void *hcpu; +}; + /* Take this CPU down. */ -static int take_cpu_down(void *unused) +static int __ref take_cpu_down(void *_param) { + struct take_cpu_down_param *param = _param; int err; /* Ensure this CPU doesn't handle any more interrupts. */ @@ -108,59 +178,64 @@ static int take_cpu_down(void *unused) if (err < 0) return err; + raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, + param->hcpu); + /* Force idle task to run as soon as we yield: it should immediately notice cpu is offline and die quickly. */ sched_idle_next(); return 0; } -int cpu_down(unsigned int cpu) +/* Requires cpu_add_remove_lock to be held */ +static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { - int err; - struct task_struct *p; - cpumask_t old_allowed, tmp; + int err, nr_calls = 0; + cpumask_var_t old_allowed; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; - if ((err = lock_cpu_hotplug_interruptible()) != 0) - return err; + if (num_online_cpus() == 1) + return -EBUSY; - if (num_online_cpus() == 1) { - err = -EBUSY; - goto out; - } + if (!cpu_online(cpu)) + return -EINVAL; - if (!cpu_online(cpu)) { - err = -EINVAL; - goto out; - } + if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) + return -ENOMEM; - err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, - (void *)(long)cpu); + cpu_hotplug_begin(); + err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, + hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { + nr_calls--; + __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, + hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", - __FUNCTION__, cpu); + __func__, cpu); err = -EINVAL; - goto out; + goto out_release; } /* Ensure that we are not runnable on dying cpu */ - old_allowed = current->cpus_allowed; - tmp = CPU_MASK_ALL; - cpu_clear(cpu, tmp); - set_cpus_allowed(current, tmp); + cpumask_copy(old_allowed, ¤t->cpus_allowed); + set_cpus_allowed_ptr(current, + cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); - p = __stop_machine_run(take_cpu_down, NULL, cpu); - if (IS_ERR(p)) { + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); + if (err) { /* CPU didn't die: tell everyone. Can't complain. */ - if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, - (void *)(long)cpu) == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, + hcpu) == NOTIFY_BAD) BUG(); - err = PTR_ERR(p); goto out_allowed; } - - if (cpu_online(cpu)) - goto out_thread; + BUG_ON(cpu_online(cpu)); /* Wait for it to sleep (leaving idle task). */ while (!idle_cpu(cpu)) @@ -169,44 +244,82 @@ int cpu_down(unsigned int cpu) /* This actually kills the CPU. */ __cpu_die(cpu); - /* Move it here so it can run. */ - kthread_bind(p, get_cpu()); - put_cpu(); - /* CPU is completely dead: tell everyone. Too late to complain. */ - if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, - (void *)(long)cpu) == NOTIFY_BAD) + if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, + hcpu) == NOTIFY_BAD) BUG(); check_for_tasks(cpu); -out_thread: - err = kthread_stop(p); out_allowed: - set_cpus_allowed(current, old_allowed); -out: - unlock_cpu_hotplug(); + set_cpus_allowed_ptr(current, old_allowed); +out_release: + cpu_hotplug_done(); + if (!err) { + if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, + hcpu) == NOTIFY_BAD) + BUG(); + } + free_cpumask_var(old_allowed); return err; } -#endif /*CONFIG_HOTPLUG_CPU*/ -int __devinit cpu_up(unsigned int cpu) +int __ref cpu_down(unsigned int cpu) { - int ret; - void *hcpu = (void *)(long)cpu; + int err; - if ((ret = lock_cpu_hotplug_interruptible()) != 0) - return ret; + err = stop_machine_create(); + if (err) + return err; + cpu_maps_update_begin(); - if (cpu_online(cpu) || !cpu_present(cpu)) { - ret = -EINVAL; + if (cpu_hotplug_disabled) { + err = -EBUSY; goto out; } - ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); + set_cpu_active(cpu, false); + + /* + * Make sure the all cpus did the reschedule and are not + * using stale version of the cpu_active_mask. + * This is not strictly necessary becuase stop_machine() + * that we run down the line already provides the required + * synchronization. But it's really a side effect and we do not + * want to depend on the innards of the stop_machine here. + */ + synchronize_sched(); + + err = _cpu_down(cpu, 0); + + if (cpu_online(cpu)) + set_cpu_active(cpu, true); + +out: + cpu_maps_update_done(); + stop_machine_destroy(); + return err; +} +EXPORT_SYMBOL(cpu_down); +#endif /*CONFIG_HOTPLUG_CPU*/ + +/* Requires cpu_add_remove_lock to be held */ +static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) +{ + int ret, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + + if (cpu_online(cpu) || !cpu_present(cpu)) + return -EINVAL; + + cpu_hotplug_begin(); + ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, + -1, &nr_calls); if (ret == NOTIFY_BAD) { + nr_calls--; printk("%s: attempt to bring up CPU %u failed\n", - __FUNCTION__, cpu); + __func__, cpu); ret = -EINVAL; goto out_notify; } @@ -217,14 +330,250 @@ int __devinit cpu_up(unsigned int cpu) goto out_notify; BUG_ON(!cpu_online(cpu)); + set_cpu_active(cpu, true); + /* Now call notifier in preparation. */ - blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); + raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); out_notify: if (ret != 0) - blocking_notifier_call_chain(&cpu_chain, - CPU_UP_CANCELED, hcpu); -out: - unlock_cpu_hotplug(); + __raw_notifier_call_chain(&cpu_chain, + CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); + cpu_hotplug_done(); + return ret; } + +int __cpuinit cpu_up(unsigned int cpu) +{ + int err = 0; + if (!cpu_possible(cpu)) { + printk(KERN_ERR "can't online cpu %d because it is not " + "configured as may-hotadd at boot time\n", cpu); +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) + printk(KERN_ERR "please check additional_cpus= boot " + "parameter\n"); +#endif + return -EINVAL; + } + + cpu_maps_update_begin(); + + if (cpu_hotplug_disabled) { + err = -EBUSY; + goto out; + } + + err = _cpu_up(cpu, 0); + +out: + cpu_maps_update_done(); + return err; +} + +#ifdef CONFIG_PM_SLEEP_SMP +static cpumask_var_t frozen_cpus; + +int disable_nonboot_cpus(void) +{ + int cpu, first_cpu, error; + + error = stop_machine_create(); + if (error) + return error; + cpu_maps_update_begin(); + first_cpu = cpumask_first(cpu_online_mask); + /* We take down all of the non-boot CPUs in one shot to avoid races + * with the userspace trying to use the CPU hotplug at the same time + */ + cpumask_clear(frozen_cpus); + printk("Disabling non-boot CPUs ...\n"); + for_each_online_cpu(cpu) { + if (cpu == first_cpu) + continue; + error = _cpu_down(cpu, 1); + if (!error) { + cpumask_set_cpu(cpu, frozen_cpus); + printk("CPU%d is down\n", cpu); + } else { + printk(KERN_ERR "Error taking CPU%d down: %d\n", + cpu, error); + break; + } + } + + if (!error) { + BUG_ON(num_online_cpus() > 1); + /* Make sure the CPUs won't be enabled by someone else */ + cpu_hotplug_disabled = 1; + } else { + printk(KERN_ERR "Non-boot CPUs are not disabled\n"); + } + cpu_maps_update_done(); + stop_machine_destroy(); + return error; +} + +void __weak arch_enable_nonboot_cpus_begin(void) +{ +} + +void __weak arch_enable_nonboot_cpus_end(void) +{ +} + +void __ref enable_nonboot_cpus(void) +{ + int cpu, error; + + /* Allow everyone to use the CPU hotplug again */ + cpu_maps_update_begin(); + cpu_hotplug_disabled = 0; + if (cpumask_empty(frozen_cpus)) + goto out; + + printk("Enabling non-boot CPUs ...\n"); + + arch_enable_nonboot_cpus_begin(); + + for_each_cpu(cpu, frozen_cpus) { + error = _cpu_up(cpu, 1); + if (!error) { + printk("CPU%d is up\n", cpu); + continue; + } + printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); + } + + arch_enable_nonboot_cpus_end(); + + cpumask_clear(frozen_cpus); +out: + cpu_maps_update_done(); +} + +static int alloc_frozen_cpus(void) +{ + if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) + return -ENOMEM; + return 0; +} +core_initcall(alloc_frozen_cpus); +#endif /* CONFIG_PM_SLEEP_SMP */ + +/** + * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers + * @cpu: cpu that just started + * + * This function calls the cpu_chain notifiers with CPU_STARTING. + * It must be called by the arch code on the new cpu, before the new cpu + * enables interrupts and before the "boot" cpu returns from __cpu_up(). + */ +void __cpuinit notify_cpu_starting(unsigned int cpu) +{ + unsigned long val = CPU_STARTING; + +#ifdef CONFIG_PM_SLEEP_SMP + if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) + val = CPU_STARTING_FROZEN; +#endif /* CONFIG_PM_SLEEP_SMP */ + raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); +} + +#endif /* CONFIG_SMP */ + +/* + * cpu_bit_bitmap[] is a special, "compressed" data structure that + * represents all NR_CPUS bits binary values of 1< 32 + MASK_DECLARE_8(32), MASK_DECLARE_8(40), + MASK_DECLARE_8(48), MASK_DECLARE_8(56), +#endif +}; +EXPORT_SYMBOL_GPL(cpu_bit_bitmap); + +const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; +EXPORT_SYMBOL(cpu_all_bits); + +#ifdef CONFIG_INIT_ALL_POSSIBLE +static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly + = CPU_BITS_ALL; +#else +static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; +#endif +const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); +EXPORT_SYMBOL(cpu_possible_mask); + +static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; +const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); +EXPORT_SYMBOL(cpu_online_mask); + +static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; +const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); +EXPORT_SYMBOL(cpu_present_mask); + +static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; +const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); +EXPORT_SYMBOL(cpu_active_mask); + +void set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) + cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); + else + cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); +} + +void set_cpu_present(unsigned int cpu, bool present) +{ + if (present) + cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); + else + cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); +} + +void set_cpu_online(unsigned int cpu, bool online) +{ + if (online) + cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); + else + cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); +} + +void set_cpu_active(unsigned int cpu, bool active) +{ + if (active) + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); + else + cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); +} + +void init_cpu_present(const struct cpumask *src) +{ + cpumask_copy(to_cpumask(cpu_present_bits), src); +} + +void init_cpu_possible(const struct cpumask *src) +{ + cpumask_copy(to_cpumask(cpu_possible_bits), src); +} + +void init_cpu_online(const struct cpumask *src) +{ + cpumask_copy(to_cpumask(cpu_online_bits), src); +}