#include <linux/stop_machine.h>
#include <linux/mutex.h>
-/*
- * Represents all cpu's present in the system
- * In systems capable of hotplug, this map could dynamically grow
- * as new cpu's are detected in the system via any platform specific
- * method, such as ACPI for e.g.
- */
-cpumask_t cpu_present_map __read_mostly;
-EXPORT_SYMBOL(cpu_present_map);
-
-#ifndef CONFIG_SMP
-
-/*
- * Represents all cpu's that are currently online.
- */
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_online_map);
-
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
-
-#else /* CONFIG_SMP */
-
-/* Serializes the updates to cpu_online_map, cpu_present_map */
+#ifdef CONFIG_SMP
+/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
* an ongoing cpu hotplug operation.
*/
int refcount;
-} cpu_hotplug;
-
-void __init cpu_hotplug_init(void)
-{
- cpu_hotplug.active_writer = NULL;
- mutex_init(&cpu_hotplug.lock);
- cpu_hotplug.refcount = 0;
-}
-
-cpumask_t cpu_active_map;
+} cpu_hotplug = {
+ .active_writer = NULL,
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+ .refcount = 0,
+};
#ifdef CONFIG_HOTPLUG_CPU
/*
* The following two API's must be used when attempting
- * to serialize the updates to cpu_online_map, cpu_present_map.
+ * to serialize the updates to cpu_online_mask, cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
struct take_cpu_down_param *param = _param;
int err;
- raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
- param->hcpu);
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
+ raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
+ param->hcpu);
+
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly. */
sched_idle_next();
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
- struct task_struct *p;
- cpumask_t old_allowed, tmp;
+ cpumask_var_t old_allowed;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
if (!cpu_online(cpu))
return -EINVAL;
+ if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
cpu_hotplug_begin();
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
}
/* Ensure that we are not runnable on dying cpu */
- old_allowed = current->cpus_allowed;
- cpus_setall(tmp);
- cpu_clear(cpu, tmp);
- set_cpus_allowed_ptr(current, &tmp);
+ cpumask_copy(old_allowed, ¤t->cpus_allowed);
+ set_cpus_allowed_ptr(current,
+ cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
- p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
-
- if (IS_ERR(p) || cpu_online(cpu)) {
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+ if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu) == NOTIFY_BAD)
BUG();
- if (IS_ERR(p)) {
- err = PTR_ERR(p);
- goto out_allowed;
- }
- goto out_thread;
+ goto out_allowed;
}
+ BUG_ON(cpu_online(cpu));
/* Wait for it to sleep (leaving idle task). */
while (!idle_cpu(cpu))
check_for_tasks(cpu);
-out_thread:
- err = kthread_stop(p);
out_allowed:
- set_cpus_allowed_ptr(current, &old_allowed);
+ set_cpus_allowed_ptr(current, old_allowed);
out_release:
cpu_hotplug_done();
if (!err) {
hcpu) == NOTIFY_BAD)
BUG();
}
+ free_cpumask_var(old_allowed);
return err;
}
int __ref cpu_down(unsigned int cpu)
{
- int err = 0;
+ int err;
+ err = stop_machine_create();
+ if (err)
+ return err;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
goto out;
}
- cpu_clear(cpu, cpu_active_map);
+ set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
- * using stale version of the cpu_active_map.
+ * using stale version of the cpu_active_mask.
* This is not strictly necessary becuase stop_machine()
* that we run down the line already provides the required
* synchronization. But it's really a side effect and we do not
err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
out:
cpu_maps_update_done();
+ stop_machine_destroy();
return err;
}
EXPORT_SYMBOL(cpu_down);
goto out_notify;
BUG_ON(!cpu_online(cpu));
+ set_cpu_active(cpu, true);
+
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
int __cpuinit cpu_up(unsigned int cpu)
{
int err = 0;
- if (!cpu_isset(cpu, cpu_possible_map)) {
+ if (!cpu_possible(cpu)) {
printk(KERN_ERR "can't online cpu %d because it is not "
"configured as may-hotadd at boot time\n", cpu);
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
printk(KERN_ERR "please check additional_cpus= boot "
"parameter\n");
#endif
err = _cpu_up(cpu, 0);
- if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
-
out:
cpu_maps_update_done();
return err;
}
#ifdef CONFIG_PM_SLEEP_SMP
-static cpumask_t frozen_cpus;
+static cpumask_var_t frozen_cpus;
int disable_nonboot_cpus(void)
{
- int cpu, first_cpu, error = 0;
+ int cpu, first_cpu, error;
+ error = stop_machine_create();
+ if (error)
+ return error;
cpu_maps_update_begin();
- first_cpu = first_cpu(cpu_online_map);
+ first_cpu = cpumask_first(cpu_online_mask);
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
- cpus_clear(frozen_cpus);
+ cpumask_clear(frozen_cpus);
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
continue;
error = _cpu_down(cpu, 1);
if (!error) {
- cpu_set(cpu, frozen_cpus);
+ cpumask_set_cpu(cpu, frozen_cpus);
printk("CPU%d is down\n", cpu);
} else {
printk(KERN_ERR "Error taking CPU%d down: %d\n",
break;
}
}
+
if (!error) {
BUG_ON(num_online_cpus() > 1);
/* Make sure the CPUs won't be enabled by someone else */
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
}
cpu_maps_update_done();
+ stop_machine_destroy();
return error;
}
+void __weak arch_enable_nonboot_cpus_begin(void)
+{
+}
+
+void __weak arch_enable_nonboot_cpus_end(void)
+{
+}
+
void __ref enable_nonboot_cpus(void)
{
int cpu, error;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
cpu_hotplug_disabled = 0;
- if (cpus_empty(frozen_cpus))
+ if (cpumask_empty(frozen_cpus))
goto out;
printk("Enabling non-boot CPUs ...\n");
- for_each_cpu_mask_nr(cpu, frozen_cpus) {
+
+ arch_enable_nonboot_cpus_begin();
+
+ for_each_cpu(cpu, frozen_cpus) {
error = _cpu_up(cpu, 1);
if (!error) {
printk("CPU%d is up\n", cpu);
}
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
}
- cpus_clear(frozen_cpus);
+
+ arch_enable_nonboot_cpus_end();
+
+ cpumask_clear(frozen_cpus);
out:
cpu_maps_update_done();
}
+
+static int alloc_frozen_cpus(void)
+{
+ if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
+ return -ENOMEM;
+ return 0;
+}
+core_initcall(alloc_frozen_cpus);
#endif /* CONFIG_PM_SLEEP_SMP */
+/**
+ * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
+ * @cpu: cpu that just started
+ *
+ * This function calls the cpu_chain notifiers with CPU_STARTING.
+ * It must be called by the arch code on the new cpu, before the new cpu
+ * enables interrupts and before the "boot" cpu returns from __cpu_up().
+ */
+void __cpuinit notify_cpu_starting(unsigned int cpu)
+{
+ unsigned long val = CPU_STARTING;
+
+#ifdef CONFIG_PM_SLEEP_SMP
+ if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
+ val = CPU_STARTING_FROZEN;
+#endif /* CONFIG_PM_SLEEP_SMP */
+ raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
+}
+
#endif /* CONFIG_SMP */
-#ifndef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
-/* 64 bits of zeros, for initializers. */
-#if BITS_PER_LONG == 32
-#define Z64 0, 0
-#else
-#define Z64 0
-#endif
+/*
+ * cpu_bit_bitmap[] is a special, "compressed" data structure that
+ * represents all NR_CPUS bits binary values of 1<<nr.
+ *
+ * It is used by cpumask_of() to get a constant address to a CPU
+ * mask value that has a single bit set only.
+ */
-/* Initializer macros. */
-#define CMI0(n) { .bits = { 1UL << (n) } }
-#define CMI(n, ...) { .bits = { __VA_ARGS__, 1UL << ((n) % BITS_PER_LONG) } }
-
-#define CMI8(n, ...) \
- CMI((n), __VA_ARGS__), CMI((n)+1, __VA_ARGS__), \
- CMI((n)+2, __VA_ARGS__), CMI((n)+3, __VA_ARGS__), \
- CMI((n)+4, __VA_ARGS__), CMI((n)+5, __VA_ARGS__), \
- CMI((n)+6, __VA_ARGS__), CMI((n)+7, __VA_ARGS__)
-
-#if BITS_PER_LONG == 32
-#define CMI64(n, ...) \
- CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
- CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
- CMI8((n)+32, 0, __VA_ARGS__), CMI8((n)+40, 0, __VA_ARGS__), \
- CMI8((n)+48, 0, __VA_ARGS__), CMI8((n)+56, 0, __VA_ARGS__)
-#else
-#define CMI64(n, ...) \
- CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
- CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
- CMI8((n)+32, __VA_ARGS__), CMI8((n)+40, __VA_ARGS__), \
- CMI8((n)+48, __VA_ARGS__), CMI8((n)+56, __VA_ARGS__)
-#endif
+/* cpu_bit_bitmap[0] is empty - so we can back into it */
+#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
+#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
+#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
+#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
-#define CMI256(n, ...) \
- CMI64((n), __VA_ARGS__), CMI64((n)+64, Z64, __VA_ARGS__), \
- CMI64((n)+128, Z64, Z64, __VA_ARGS__), \
- CMI64((n)+192, Z64, Z64, Z64, __VA_ARGS__)
-#define Z256 Z64, Z64, Z64, Z64
-
-#define CMI1024(n, ...) \
- CMI256((n), __VA_ARGS__), \
- CMI256((n)+256, Z256, __VA_ARGS__), \
- CMI256((n)+512, Z256, Z256, __VA_ARGS__), \
- CMI256((n)+768, Z256, Z256, Z256, __VA_ARGS__)
-#define Z1024 Z256, Z256, Z256, Z256
-
-/* We want this statically initialized, just to be safe. We try not
- * to waste too much space, either. */
-static const cpumask_t cpumask_map[] = {
- CMI0(0), CMI0(1), CMI0(2), CMI0(3),
-#if NR_CPUS > 4
- CMI0(4), CMI0(5), CMI0(6), CMI0(7),
-#endif
-#if NR_CPUS > 8
- CMI0(8), CMI0(9), CMI0(10), CMI0(11),
- CMI0(12), CMI0(13), CMI0(14), CMI0(15),
-#endif
-#if NR_CPUS > 16
- CMI0(16), CMI0(17), CMI0(18), CMI0(19),
- CMI0(20), CMI0(21), CMI0(22), CMI0(23),
- CMI0(24), CMI0(25), CMI0(26), CMI0(27),
- CMI0(28), CMI0(29), CMI0(30), CMI0(31),
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+
+ MASK_DECLARE_8(0), MASK_DECLARE_8(8),
+ MASK_DECLARE_8(16), MASK_DECLARE_8(24),
+#if BITS_PER_LONG > 32
+ MASK_DECLARE_8(32), MASK_DECLARE_8(40),
+ MASK_DECLARE_8(48), MASK_DECLARE_8(56),
#endif
-#if NR_CPUS > 32
-#if BITS_PER_LONG == 32
- CMI(32, 0), CMI(33, 0), CMI(34, 0), CMI(35, 0),
- CMI(36, 0), CMI(37, 0), CMI(38, 0), CMI(39, 0),
- CMI(40, 0), CMI(41, 0), CMI(42, 0), CMI(43, 0),
- CMI(44, 0), CMI(45, 0), CMI(46, 0), CMI(47, 0),
- CMI(48, 0), CMI(49, 0), CMI(50, 0), CMI(51, 0),
- CMI(52, 0), CMI(53, 0), CMI(54, 0), CMI(55, 0),
- CMI(56, 0), CMI(57, 0), CMI(58, 0), CMI(59, 0),
- CMI(60, 0), CMI(61, 0), CMI(62, 0), CMI(63, 0),
+};
+EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
+
+const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
+EXPORT_SYMBOL(cpu_all_bits);
+
+#ifdef CONFIG_INIT_ALL_POSSIBLE
+static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
+ = CPU_BITS_ALL;
#else
- CMI0(32), CMI0(33), CMI0(34), CMI0(35),
- CMI0(36), CMI0(37), CMI0(38), CMI0(39),
- CMI0(40), CMI0(41), CMI0(42), CMI0(43),
- CMI0(44), CMI0(45), CMI0(46), CMI0(47),
- CMI0(48), CMI0(49), CMI0(50), CMI0(51),
- CMI0(52), CMI0(53), CMI0(54), CMI0(55),
- CMI0(56), CMI0(57), CMI0(58), CMI0(59),
- CMI0(60), CMI0(61), CMI0(62), CMI0(63),
-#endif /* BITS_PER_LONG == 64 */
-#endif
-#if NR_CPUS > 64
- CMI64(64, Z64),
-#endif
-#if NR_CPUS > 128
- CMI64(128, Z64, Z64), CMI64(192, Z64, Z64, Z64),
-#endif
-#if NR_CPUS > 256
- CMI256(256, Z256),
-#endif
-#if NR_CPUS > 512
- CMI256(512, Z256, Z256), CMI256(768, Z256, Z256, Z256),
-#endif
-#if NR_CPUS > 1024
- CMI1024(1024, Z1024),
-#endif
-#if NR_CPUS > 2048
- CMI1024(2048, Z1024, Z1024), CMI1024(3072, Z1024, Z1024, Z1024),
-#endif
-#if NR_CPUS > 4096
-#error NR_CPUS too big. Fix initializers or set CONFIG_HAVE_CPUMASK_OF_CPU_MAP
+static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
-};
+const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
+EXPORT_SYMBOL(cpu_possible_mask);
+
+static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
+EXPORT_SYMBOL(cpu_online_mask);
+
+static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
+EXPORT_SYMBOL(cpu_present_mask);
+
+static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
+const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
+EXPORT_SYMBOL(cpu_active_mask);
+
+void set_cpu_possible(unsigned int cpu, bool possible)
+{
+ if (possible)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
+}
+
+void set_cpu_present(unsigned int cpu, bool present)
+{
+ if (present)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
+}
+
+void set_cpu_online(unsigned int cpu, bool online)
+{
+ if (online)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
+}
+
+void set_cpu_active(unsigned int cpu, bool active)
+{
+ if (active)
+ cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
+ else
+ cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
+}
+
+void init_cpu_present(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_present_bits), src);
+}
+
+void init_cpu_possible(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_possible_bits), src);
+}
-const cpumask_t *cpumask_of_cpu_map = cpumask_map;
-#endif /* !CONFIG_HAVE_CPUMASK_OF_CPU_MAP */
+void init_cpu_online(const struct cpumask *src)
+{
+ cpumask_copy(to_cpumask(cpu_online_bits), src);
+}