classmate-laptop: add support for Classmate PC ACPI devices
[safe/jmp/linux-2.6] / drivers / staging / dst / thread_pool.c
1 /*
2  * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/dst.h>
18 #include <linux/kthread.h>
19 #include <linux/slab.h>
20
21 /*
22  * Thread pool abstraction allows to schedule a work to be performed
23  * on behalf of kernel thread. One does not operate with threads itself,
24  * instead user provides setup and cleanup callbacks for thread pool itself,
25  * and action and cleanup callbacks for each submitted work.
26  *
27  * Each worker has private data initialized at creation time and data,
28  * provided by user at scheduling time.
29  *
30  * When action is being performed, thread can not be used by other users,
31  * instead they will sleep until there is free thread to pick their work.
32  */
33 struct thread_pool_worker {
34         struct list_head        worker_entry;
35
36         struct task_struct      *thread;
37
38         struct thread_pool      *pool;
39
40         int                     error;
41         int                     has_data;
42         int                     need_exit;
43         unsigned int            id;
44
45         wait_queue_head_t       wait;
46
47         void                    *private;
48         void                    *schedule_data;
49
50         int                     (*action)(void *private, void *schedule_data);
51         void                    (*cleanup)(void *private);
52 };
53
54 static void thread_pool_exit_worker(struct thread_pool_worker *w)
55 {
56         kthread_stop(w->thread);
57
58         w->cleanup(w->private);
59         kfree(w);
60 }
61
62 /*
63  * Called to mark thread as ready and allow users to schedule new work.
64  */
65 static void thread_pool_worker_make_ready(struct thread_pool_worker *w)
66 {
67         struct thread_pool *p = w->pool;
68
69         mutex_lock(&p->thread_lock);
70
71         if (!w->need_exit) {
72                 list_move_tail(&w->worker_entry, &p->ready_list);
73                 w->has_data = 0;
74                 mutex_unlock(&p->thread_lock);
75
76                 wake_up(&p->wait);
77         } else {
78                 p->thread_num--;
79                 list_del(&w->worker_entry);
80                 mutex_unlock(&p->thread_lock);
81
82                 thread_pool_exit_worker(w);
83         }
84 }
85
86 /*
87  * Thread action loop: waits until there is new work.
88  */
89 static int thread_pool_worker_func(void *data)
90 {
91         struct thread_pool_worker *w = data;
92
93         while (!kthread_should_stop()) {
94                 wait_event_interruptible(w->wait,
95                         kthread_should_stop() || w->has_data);
96
97                 if (kthread_should_stop())
98                         break;
99
100                 if (!w->has_data)
101                         continue;
102
103                 w->action(w->private, w->schedule_data);
104                 thread_pool_worker_make_ready(w);
105         }
106
107         return 0;
108 }
109
110 /*
111  * Remove single worker without specifying which one.
112  */
113 void thread_pool_del_worker(struct thread_pool *p)
114 {
115         struct thread_pool_worker *w = NULL;
116
117         while (!w && p->thread_num) {
118                 wait_event(p->wait, !list_empty(&p->ready_list) ||
119                                 !p->thread_num);
120
121                 dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
122                                 __func__, list_empty(&p->ready_list),
123                                 p->thread_num);
124
125                 mutex_lock(&p->thread_lock);
126                 if (!list_empty(&p->ready_list)) {
127                         w = list_first_entry(&p->ready_list,
128                                         struct thread_pool_worker,
129                                         worker_entry);
130
131                         dprintk("%s: deleting w: %p, thread_num: %d, "
132                                         "list: %p [%p.%p].\n", __func__,
133                                         w, p->thread_num, &p->ready_list,
134                                         p->ready_list.prev, p->ready_list.next);
135
136                         p->thread_num--;
137                         list_del(&w->worker_entry);
138                 }
139                 mutex_unlock(&p->thread_lock);
140         }
141
142         if (w)
143                 thread_pool_exit_worker(w);
144         dprintk("%s: deleted w: %p, thread_num: %d.\n",
145                         __func__, w, p->thread_num);
146 }
147
148 /*
149  * Remove a worker with given ID.
150  */
151 void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id)
152 {
153         struct thread_pool_worker *w;
154         int found = 0;
155
156         mutex_lock(&p->thread_lock);
157         list_for_each_entry(w, &p->ready_list, worker_entry) {
158                 if (w->id == id) {
159                         found = 1;
160                         p->thread_num--;
161                         list_del(&w->worker_entry);
162                         break;
163                 }
164         }
165
166         if (!found) {
167                 list_for_each_entry(w, &p->active_list, worker_entry) {
168                         if (w->id == id) {
169                                 w->need_exit = 1;
170                                 break;
171                         }
172                 }
173         }
174         mutex_unlock(&p->thread_lock);
175
176         if (found)
177                 thread_pool_exit_worker(w);
178 }
179
180 /*
181  * Add new worker thread with given parameters.
182  * If initialization callback fails, return error.
183  */
184 int thread_pool_add_worker(struct thread_pool *p,
185                 char *name,
186                 unsigned int id,
187                 void *(*init)(void *private),
188                 void (*cleanup)(void *private),
189                 void *private)
190 {
191         struct thread_pool_worker *w;
192         int err = -ENOMEM;
193
194         w = kzalloc(sizeof(struct thread_pool_worker), GFP_KERNEL);
195         if (!w)
196                 goto err_out_exit;
197
198         w->pool = p;
199         init_waitqueue_head(&w->wait);
200         w->cleanup = cleanup;
201         w->id = id;
202
203         w->thread = kthread_run(thread_pool_worker_func, w, "%s", name);
204         if (IS_ERR(w->thread)) {
205                 err = PTR_ERR(w->thread);
206                 goto err_out_free;
207         }
208
209         w->private = init(private);
210         if (IS_ERR(w->private)) {
211                 err = PTR_ERR(w->private);
212                 goto err_out_stop_thread;
213         }
214
215         mutex_lock(&p->thread_lock);
216         list_add_tail(&w->worker_entry, &p->ready_list);
217         p->thread_num++;
218         mutex_unlock(&p->thread_lock);
219
220         return 0;
221
222 err_out_stop_thread:
223         kthread_stop(w->thread);
224 err_out_free:
225         kfree(w);
226 err_out_exit:
227         return err;
228 }
229
230 /*
231  * Destroy the whole pool.
232  */
233 void thread_pool_destroy(struct thread_pool *p)
234 {
235         while (p->thread_num) {
236                 dprintk("%s: num: %d.\n", __func__, p->thread_num);
237                 thread_pool_del_worker(p);
238         }
239
240         kfree(p);
241 }
242
243 /*
244  * Create a pool with given number of threads.
245  * They will have sequential IDs started from zero.
246  */
247 struct thread_pool *thread_pool_create(int num, char *name,
248                 void *(*init)(void *private),
249                 void (*cleanup)(void *private),
250                 void *private)
251 {
252         struct thread_pool_worker *w, *tmp;
253         struct thread_pool *p;
254         int err = -ENOMEM;
255         int i;
256
257         p = kzalloc(sizeof(struct thread_pool), GFP_KERNEL);
258         if (!p)
259                 goto err_out_exit;
260
261         init_waitqueue_head(&p->wait);
262         mutex_init(&p->thread_lock);
263         INIT_LIST_HEAD(&p->ready_list);
264         INIT_LIST_HEAD(&p->active_list);
265         p->thread_num = 0;
266
267         for (i = 0; i < num; ++i) {
268                 err = thread_pool_add_worker(p, name, i, init,
269                                 cleanup, private);
270                 if (err)
271                         goto err_out_free_all;
272         }
273
274         return p;
275
276 err_out_free_all:
277         list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
278                 list_del(&w->worker_entry);
279                 thread_pool_exit_worker(w);
280         }
281         kfree(p);
282 err_out_exit:
283         return ERR_PTR(err);
284 }
285
286 /*
287  * Schedule execution of the action on a given thread,
288  * provided ID pointer has to match previously stored
289  * private data.
290  */
291 int thread_pool_schedule_private(struct thread_pool *p,
292                 int (*setup)(void *private, void *data),
293                 int (*action)(void *private, void *data),
294                 void *data, long timeout, void *id)
295 {
296         struct thread_pool_worker *w, *tmp, *worker = NULL;
297         int err = 0;
298
299         while (!worker && !err) {
300                 timeout = wait_event_interruptible_timeout(p->wait,
301                                 !list_empty(&p->ready_list),
302                                 timeout);
303
304                 if (!timeout) {
305                         err = -ETIMEDOUT;
306                         break;
307                 }
308
309                 worker = NULL;
310                 mutex_lock(&p->thread_lock);
311                 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
312                         if (id && id != w->private)
313                                 continue;
314
315                         worker = w;
316
317                         list_move_tail(&w->worker_entry, &p->active_list);
318
319                         err = setup(w->private, data);
320                         if (!err) {
321                                 w->schedule_data = data;
322                                 w->action = action;
323                                 w->has_data = 1;
324                                 wake_up(&w->wait);
325                         } else {
326                                 list_move_tail(&w->worker_entry,
327                                                 &p->ready_list);
328                         }
329
330                         break;
331                 }
332                 mutex_unlock(&p->thread_lock);
333         }
334
335         return err;
336 }
337
338 /*
339  * Schedule execution on arbitrary thread from the pool.
340  */
341 int thread_pool_schedule(struct thread_pool *p,
342                 int (*setup)(void *private, void *data),
343                 int (*action)(void *private, void *data),
344                 void *data, long timeout)
345 {
346         return thread_pool_schedule_private(p, setup,
347                         action, data, timeout, NULL);
348 }