async: Handle kthread_run() return codes.
[safe/jmp/linux-2.6] / kernel / async.c
1 /*
2  * async.c: Asynchronous function calls for boot performance
3  *
4  * (C) Copyright 2009 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12
13
14 /*
15
16 Goals and Theory of Operation
17
18 The primary goal of this feature is to reduce the kernel boot time,
19 by doing various independent hardware delays and discovery operations
20 decoupled and not strictly serialized.
21
22 More specifically, the asynchronous function call concept allows
23 certain operations (primarily during system boot) to happen
24 asynchronously, out of order, while these operations still
25 have their externally visible parts happen sequentially and in-order.
26 (not unlike how out-of-order CPUs retire their instructions in order)
27
28 Key to the asynchronous function call implementation is the concept of
29 a "sequence cookie" (which, although it has an abstracted type, can be
30 thought of as a monotonically incrementing number).
31
32 The async core will assign each scheduled event such a sequence cookie and
33 pass this to the called functions.
34
35 The asynchronously called function should before doing a globally visible
36 operation, such as registering device numbers, call the
37 async_synchronize_cookie() function and pass in its own cookie. The
38 async_synchronize_cookie() function will make sure that all asynchronous
39 operations that were scheduled prior to the operation corresponding with the
40 cookie have completed.
41
42 Subsystem/driver initialization code that scheduled asynchronous probe
43 functions, but which shares global resources with other drivers/subsystems
44 that do not use the asynchronous call feature, need to do a full
45 synchronization with the async_synchronize_full() function, before returning
46 from their init function. This is to maintain strict ordering between the
47 asynchronous and synchronous parts of the kernel.
48
49 */
50
51 #include <linux/async.h>
52 #include <linux/module.h>
53 #include <linux/wait.h>
54 #include <linux/sched.h>
55 #include <linux/init.h>
56 #include <linux/kthread.h>
57 #include <linux/delay.h>
58 #include <asm/atomic.h>
59
60 static async_cookie_t next_cookie = 1;
61
62 #define MAX_THREADS     256
63 #define MAX_WORK        32768
64
65 static LIST_HEAD(async_pending);
66 static LIST_HEAD(async_running);
67 static DEFINE_SPINLOCK(async_lock);
68
69 static int async_enabled = 0;
70
71 struct async_entry {
72         struct list_head list;
73         async_cookie_t   cookie;
74         async_func_ptr   *func;
75         void             *data;
76         struct list_head *running;
77 };
78
79 static DECLARE_WAIT_QUEUE_HEAD(async_done);
80 static DECLARE_WAIT_QUEUE_HEAD(async_new);
81
82 static atomic_t entry_count;
83 static atomic_t thread_count;
84
85 extern int initcall_debug;
86
87
88 /*
89  * MUST be called with the lock held!
90  */
91 static async_cookie_t  __lowest_in_progress(struct list_head *running)
92 {
93         struct async_entry *entry;
94         if (!list_empty(running)) {
95                 entry = list_first_entry(running,
96                         struct async_entry, list);
97                 return entry->cookie;
98         } else if (!list_empty(&async_pending)) {
99                 entry = list_first_entry(&async_pending,
100                         struct async_entry, list);
101                 return entry->cookie;
102         } else {
103                 /* nothing in progress... next_cookie is "infinity" */
104                 return next_cookie;
105         }
106
107 }
108
109 static async_cookie_t  lowest_in_progress(struct list_head *running)
110 {
111         unsigned long flags;
112         async_cookie_t ret;
113
114         spin_lock_irqsave(&async_lock, flags);
115         ret = __lowest_in_progress(running);
116         spin_unlock_irqrestore(&async_lock, flags);
117         return ret;
118 }
119 /*
120  * pick the first pending entry and run it
121  */
122 static void run_one_entry(void)
123 {
124         unsigned long flags;
125         struct async_entry *entry;
126         ktime_t calltime, delta, rettime;
127
128         /* 1) pick one task from the pending queue */
129
130         spin_lock_irqsave(&async_lock, flags);
131         if (list_empty(&async_pending))
132                 goto out;
133         entry = list_first_entry(&async_pending, struct async_entry, list);
134
135         /* 2) move it to the running queue */
136         list_del(&entry->list);
137         list_add_tail(&entry->list, entry->running);
138         spin_unlock_irqrestore(&async_lock, flags);
139
140         /* 3) run it (and print duration)*/
141         if (initcall_debug && system_state == SYSTEM_BOOTING) {
142                 printk("calling  %lli_%pF @ %i\n", (long long)entry->cookie,
143                         entry->func, task_pid_nr(current));
144                 calltime = ktime_get();
145         }
146         entry->func(entry->data, entry->cookie);
147         if (initcall_debug && system_state == SYSTEM_BOOTING) {
148                 rettime = ktime_get();
149                 delta = ktime_sub(rettime, calltime);
150                 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
151                         (long long)entry->cookie,
152                         entry->func,
153                         (long long)ktime_to_ns(delta) >> 10);
154         }
155
156         /* 4) remove it from the running queue */
157         spin_lock_irqsave(&async_lock, flags);
158         list_del(&entry->list);
159
160         /* 5) free the entry  */
161         kfree(entry);
162         atomic_dec(&entry_count);
163
164         spin_unlock_irqrestore(&async_lock, flags);
165
166         /* 6) wake up any waiters. */
167         wake_up(&async_done);
168         return;
169
170 out:
171         spin_unlock_irqrestore(&async_lock, flags);
172 }
173
174
175 static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
176 {
177         struct async_entry *entry;
178         unsigned long flags;
179         async_cookie_t newcookie;
180         
181
182         /* allow irq-off callers */
183         entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
184
185         /*
186          * If we're out of memory or if there's too much work
187          * pending already, we execute synchronously.
188          */
189         if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
190                 kfree(entry);
191                 spin_lock_irqsave(&async_lock, flags);
192                 newcookie = next_cookie++;
193                 spin_unlock_irqrestore(&async_lock, flags);
194
195                 /* low on memory.. run synchronously */
196                 ptr(data, newcookie);
197                 return newcookie;
198         }
199         entry->func = ptr;
200         entry->data = data;
201         entry->running = running;
202
203         spin_lock_irqsave(&async_lock, flags);
204         newcookie = entry->cookie = next_cookie++;
205         list_add_tail(&entry->list, &async_pending);
206         atomic_inc(&entry_count);
207         spin_unlock_irqrestore(&async_lock, flags);
208         wake_up(&async_new);
209         return newcookie;
210 }
211
212 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
213 {
214         return __async_schedule(ptr, data, &async_running);
215 }
216 EXPORT_SYMBOL_GPL(async_schedule);
217
218 async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
219 {
220         return __async_schedule(ptr, data, running);
221 }
222 EXPORT_SYMBOL_GPL(async_schedule_special);
223
224 void async_synchronize_full(void)
225 {
226         do {
227                 async_synchronize_cookie(next_cookie);
228         } while (!list_empty(&async_running) || !list_empty(&async_pending));
229 }
230 EXPORT_SYMBOL_GPL(async_synchronize_full);
231
232 void async_synchronize_full_special(struct list_head *list)
233 {
234         async_synchronize_cookie_special(next_cookie, list);
235 }
236 EXPORT_SYMBOL_GPL(async_synchronize_full_special);
237
238 void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
239 {
240         ktime_t starttime, delta, endtime;
241
242         if (initcall_debug && system_state == SYSTEM_BOOTING) {
243                 printk("async_waiting @ %i\n", task_pid_nr(current));
244                 starttime = ktime_get();
245         }
246
247         wait_event(async_done, lowest_in_progress(running) >= cookie);
248
249         if (initcall_debug && system_state == SYSTEM_BOOTING) {
250                 endtime = ktime_get();
251                 delta = ktime_sub(endtime, starttime);
252
253                 printk("async_continuing @ %i after %lli usec\n",
254                         task_pid_nr(current),
255                         (long long)ktime_to_ns(delta) >> 10);
256         }
257 }
258 EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
259
260 void async_synchronize_cookie(async_cookie_t cookie)
261 {
262         async_synchronize_cookie_special(cookie, &async_running);
263 }
264 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
265
266
267 static int async_thread(void *unused)
268 {
269         DECLARE_WAITQUEUE(wq, current);
270         add_wait_queue(&async_new, &wq);
271
272         while (!kthread_should_stop()) {
273                 int ret = HZ;
274                 set_current_state(TASK_INTERRUPTIBLE);
275                 /*
276                  * check the list head without lock.. false positives
277                  * are dealt with inside run_one_entry() while holding
278                  * the lock.
279                  */
280                 rmb();
281                 if (!list_empty(&async_pending))
282                         run_one_entry();
283                 else
284                         ret = schedule_timeout(HZ);
285
286                 if (ret == 0) {
287                         /*
288                          * we timed out, this means we as thread are redundant.
289                          * we sign off and die, but we to avoid any races there
290                          * is a last-straw check to see if work snuck in.
291                          */
292                         atomic_dec(&thread_count);
293                         wmb(); /* manager must see our departure first */
294                         if (list_empty(&async_pending))
295                                 break;
296                         /*
297                          * woops work came in between us timing out and us
298                          * signing off; we need to stay alive and keep working.
299                          */
300                         atomic_inc(&thread_count);
301                 }
302         }
303         remove_wait_queue(&async_new, &wq);
304
305         return 0;
306 }
307
308 static int async_manager_thread(void *unused)
309 {
310         DECLARE_WAITQUEUE(wq, current);
311         add_wait_queue(&async_new, &wq);
312
313         while (!kthread_should_stop()) {
314                 int tc, ec;
315
316                 set_current_state(TASK_INTERRUPTIBLE);
317
318                 tc = atomic_read(&thread_count);
319                 rmb();
320                 ec = atomic_read(&entry_count);
321
322                 while (tc < ec && tc < MAX_THREADS) {
323                         if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
324                                                tc))) {
325                                 msleep(100);
326                                 continue;
327                         }
328                         atomic_inc(&thread_count);
329                         tc++;
330                 }
331
332                 schedule();
333         }
334         remove_wait_queue(&async_new, &wq);
335
336         return 0;
337 }
338
339 static int __init async_init(void)
340 {
341         if (async_enabled)
342                 if (IS_ERR(kthread_run(async_manager_thread, NULL,
343                                        "async/mgr")))
344                         async_enabled = 0;
345         return 0;
346 }
347
348 static int __init setup_async(char *str)
349 {
350         async_enabled = 1;
351         return 1;
352 }
353
354 __setup("fastboot", setup_async);
355
356
357 core_initcall(async_init);