stop_machine: add ALL_CPUS option
[safe/jmp/linux-2.6] / kernel / stop_machine.c
1 /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2  * GPL v2 and any later version.
3  */
4 #include <linux/cpu.h>
5 #include <linux/err.h>
6 #include <linux/kthread.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/stop_machine.h>
10 #include <linux/syscalls.h>
11 #include <linux/interrupt.h>
12
13 #include <asm/atomic.h>
14 #include <asm/uaccess.h>
15
16 /* Since we effect priority and affinity (both of which are visible
17  * to, and settable by outside processes) we do indirection via a
18  * kthread. */
19
20 /* Thread to stop each CPU in user context. */
21 enum stopmachine_state {
22         STOPMACHINE_WAIT,
23         STOPMACHINE_PREPARE,
24         STOPMACHINE_DISABLE_IRQ,
25         STOPMACHINE_RUN,
26         STOPMACHINE_EXIT,
27 };
28
29 struct stop_machine_data {
30         int (*fn)(void *);
31         void *data;
32         struct completion done;
33         int run_all;
34 } smdata;
35
36 static enum stopmachine_state stopmachine_state;
37 static unsigned int stopmachine_num_threads;
38 static atomic_t stopmachine_thread_ack;
39
40 static int stopmachine(void *cpu)
41 {
42         int irqs_disabled = 0;
43         int prepared = 0;
44         int ran = 0;
45         cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
46
47         set_cpus_allowed_ptr(current, cpumask);
48
49         /* Ack: we are alive */
50         smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
51         atomic_inc(&stopmachine_thread_ack);
52
53         /* Simple state machine */
54         while (stopmachine_state != STOPMACHINE_EXIT) {
55                 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 
56                     && !irqs_disabled) {
57                         local_irq_disable();
58                         hard_irq_disable();
59                         irqs_disabled = 1;
60                         /* Ack: irqs disabled. */
61                         smp_mb(); /* Must read state first. */
62                         atomic_inc(&stopmachine_thread_ack);
63                 } else if (stopmachine_state == STOPMACHINE_PREPARE
64                            && !prepared) {
65                         /* Everyone is in place, hold CPU. */
66                         preempt_disable();
67                         prepared = 1;
68                         smp_mb(); /* Must read state first. */
69                         atomic_inc(&stopmachine_thread_ack);
70                 } else if (stopmachine_state == STOPMACHINE_RUN && !ran) {
71                         smdata.fn(smdata.data);
72                         ran = 1;
73                         smp_mb(); /* Must read state first. */
74                         atomic_inc(&stopmachine_thread_ack);
75                 }
76                 /* Yield in first stage: migration threads need to
77                  * help our sisters onto their CPUs. */
78                 if (!prepared && !irqs_disabled)
79                         yield();
80                 cpu_relax();
81         }
82
83         /* Ack: we are exiting. */
84         smp_mb(); /* Must read state first. */
85         atomic_inc(&stopmachine_thread_ack);
86
87         if (irqs_disabled)
88                 local_irq_enable();
89         if (prepared)
90                 preempt_enable();
91
92         return 0;
93 }
94
95 /* Change the thread state */
96 static void stopmachine_set_state(enum stopmachine_state state)
97 {
98         atomic_set(&stopmachine_thread_ack, 0);
99         smp_wmb();
100         stopmachine_state = state;
101         while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
102                 cpu_relax();
103 }
104
105 static int stop_machine(void)
106 {
107         int i, ret = 0;
108
109         atomic_set(&stopmachine_thread_ack, 0);
110         stopmachine_num_threads = 0;
111         stopmachine_state = STOPMACHINE_WAIT;
112
113         for_each_online_cpu(i) {
114                 if (i == raw_smp_processor_id())
115                         continue;
116                 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
117                 if (ret < 0)
118                         break;
119                 stopmachine_num_threads++;
120         }
121
122         /* Wait for them all to come to life. */
123         while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
124                 yield();
125                 cpu_relax();
126         }
127
128         /* If some failed, kill them all. */
129         if (ret < 0) {
130                 stopmachine_set_state(STOPMACHINE_EXIT);
131                 return ret;
132         }
133
134         /* Now they are all started, make them hold the CPUs, ready. */
135         preempt_disable();
136         stopmachine_set_state(STOPMACHINE_PREPARE);
137
138         /* Make them disable irqs. */
139         local_irq_disable();
140         hard_irq_disable();
141         stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
142
143         return 0;
144 }
145
146 static void restart_machine(void)
147 {
148         stopmachine_set_state(STOPMACHINE_EXIT);
149         local_irq_enable();
150         preempt_enable_no_resched();
151 }
152
153 static void run_other_cpus(void)
154 {
155         stopmachine_set_state(STOPMACHINE_RUN);
156 }
157
158 static int do_stop(void *_smdata)
159 {
160         struct stop_machine_data *smdata = _smdata;
161         int ret;
162
163         ret = stop_machine();
164         if (ret == 0) {
165                 ret = smdata->fn(smdata->data);
166                 if (smdata->run_all)
167                         run_other_cpus();
168                 restart_machine();
169         }
170
171         /* We're done: you can kthread_stop us now */
172         complete(&smdata->done);
173
174         /* Wait for kthread_stop */
175         set_current_state(TASK_INTERRUPTIBLE);
176         while (!kthread_should_stop()) {
177                 schedule();
178                 set_current_state(TASK_INTERRUPTIBLE);
179         }
180         __set_current_state(TASK_RUNNING);
181         return ret;
182 }
183
184 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
185                                        unsigned int cpu)
186 {
187         static DEFINE_MUTEX(stopmachine_mutex);
188         struct stop_machine_data smdata;
189         struct task_struct *p;
190
191         mutex_lock(&stopmachine_mutex);
192
193         smdata.fn = fn;
194         smdata.data = data;
195         smdata.run_all = (cpu == ALL_CPUS) ? 1 : 0;
196         init_completion(&smdata.done);
197
198         smp_wmb(); /* make sure other cpus see smdata updates */
199
200         /* If they don't care which CPU fn runs on, bind to any online one. */
201         if (cpu == NR_CPUS || cpu == ALL_CPUS)
202                 cpu = raw_smp_processor_id();
203
204         p = kthread_create(do_stop, &smdata, "kstopmachine");
205         if (!IS_ERR(p)) {
206                 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
207
208                 /* One high-prio thread per cpu.  We'll do this one. */
209                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
210                 kthread_bind(p, cpu);
211                 wake_up_process(p);
212                 wait_for_completion(&smdata.done);
213         }
214         mutex_unlock(&stopmachine_mutex);
215         return p;
216 }
217
218 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
219 {
220         struct task_struct *p;
221         int ret;
222
223         /* No CPUs can come up or down during this. */
224         get_online_cpus();
225         p = __stop_machine_run(fn, data, cpu);
226         if (!IS_ERR(p))
227                 ret = kthread_stop(p);
228         else
229                 ret = PTR_ERR(p);
230         put_online_cpus();
231
232         return ret;
233 }
234 EXPORT_SYMBOL_GPL(stop_machine_run);