implement flush_work()
[safe/jmp/linux-2.6] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35
36 /*
37  * The per-CPU workqueue (if single thread, we always use the first
38  * possible cpu).
39  */
40 struct cpu_workqueue_struct {
41
42         spinlock_t lock;
43
44         struct list_head worklist;
45         wait_queue_head_t more_work;
46
47         struct workqueue_struct *wq;
48         struct task_struct *thread;
49         struct work_struct *current_work;
50
51         int run_depth;          /* Detect run_workqueue() recursion depth */
52
53         int freezeable;         /* Freeze the thread during suspend */
54 } ____cacheline_aligned;
55
56 /*
57  * The externally visible workqueue abstraction is an array of
58  * per-CPU workqueues:
59  */
60 struct workqueue_struct {
61         struct cpu_workqueue_struct *cpu_wq;
62         const char *name;
63         struct list_head list;  /* Empty if single thread */
64 };
65
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67    threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex);
69 static LIST_HEAD(workqueues);
70
71 static int singlethread_cpu;
72
73 /* If it's single threaded, it isn't in the list of workqueues. */
74 static inline int is_single_threaded(struct workqueue_struct *wq)
75 {
76         return list_empty(&wq->list);
77 }
78
79 /*
80  * Set the workqueue on which a work item is to be run
81  * - Must *only* be called if the pending flag is set
82  */
83 static inline void set_wq_data(struct work_struct *work, void *wq)
84 {
85         unsigned long new;
86
87         BUG_ON(!work_pending(work));
88
89         new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90         new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
91         atomic_long_set(&work->data, new);
92 }
93
94 static inline void *get_wq_data(struct work_struct *work)
95 {
96         return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
97 }
98
99 static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
100 {
101         int ret = 0;
102         unsigned long flags;
103
104         spin_lock_irqsave(&cwq->lock, flags);
105         /*
106          * We need to re-validate the work info after we've gotten
107          * the cpu_workqueue lock. We can run the work now iff:
108          *
109          *  - the wq_data still matches the cpu_workqueue_struct
110          *  - AND the work is still marked pending
111          *  - AND the work is still on a list (which will be this
112          *    workqueue_struct list)
113          *
114          * All these conditions are important, because we
115          * need to protect against the work being run right
116          * now on another CPU (all but the last one might be
117          * true if it's currently running and has not been
118          * released yet, for example).
119          */
120         if (get_wq_data(work) == cwq
121             && work_pending(work)
122             && !list_empty(&work->entry)) {
123                 work_func_t f = work->func;
124                 cwq->current_work = work;
125                 list_del_init(&work->entry);
126                 spin_unlock_irqrestore(&cwq->lock, flags);
127
128                 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
129                         work_release(work);
130                 f(work);
131
132                 spin_lock_irqsave(&cwq->lock, flags);
133                 cwq->current_work = NULL;
134                 ret = 1;
135         }
136         spin_unlock_irqrestore(&cwq->lock, flags);
137         return ret;
138 }
139
140 /**
141  * run_scheduled_work - run scheduled work synchronously
142  * @work: work to run
143  *
144  * This checks if the work was pending, and runs it
145  * synchronously if so. It returns a boolean to indicate
146  * whether it had any scheduled work to run or not.
147  *
148  * NOTE! This _only_ works for normal work_structs. You
149  * CANNOT use this for delayed work, because the wq data
150  * for delayed work will not point properly to the per-
151  * CPU workqueue struct, but will change!
152  */
153 int fastcall run_scheduled_work(struct work_struct *work)
154 {
155         for (;;) {
156                 struct cpu_workqueue_struct *cwq;
157
158                 if (!work_pending(work))
159                         return 0;
160                 if (list_empty(&work->entry))
161                         return 0;
162                 /* NOTE! This depends intimately on __queue_work! */
163                 cwq = get_wq_data(work);
164                 if (!cwq)
165                         return 0;
166                 if (__run_work(cwq, work))
167                         return 1;
168         }
169 }
170 EXPORT_SYMBOL(run_scheduled_work);
171
172 static void insert_work(struct cpu_workqueue_struct *cwq,
173                                 struct work_struct *work, int tail)
174 {
175         set_wq_data(work, cwq);
176         if (tail)
177                 list_add_tail(&work->entry, &cwq->worklist);
178         else
179                 list_add(&work->entry, &cwq->worklist);
180         wake_up(&cwq->more_work);
181 }
182
183 /* Preempt must be disabled. */
184 static void __queue_work(struct cpu_workqueue_struct *cwq,
185                          struct work_struct *work)
186 {
187         unsigned long flags;
188
189         spin_lock_irqsave(&cwq->lock, flags);
190         insert_work(cwq, work, 1);
191         spin_unlock_irqrestore(&cwq->lock, flags);
192 }
193
194 /**
195  * queue_work - queue work on a workqueue
196  * @wq: workqueue to use
197  * @work: work to queue
198  *
199  * Returns 0 if @work was already on a queue, non-zero otherwise.
200  *
201  * We queue the work to the CPU it was submitted, but there is no
202  * guarantee that it will be processed by that CPU.
203  */
204 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
205 {
206         int ret = 0, cpu = get_cpu();
207
208         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
209                 if (unlikely(is_single_threaded(wq)))
210                         cpu = singlethread_cpu;
211                 BUG_ON(!list_empty(&work->entry));
212                 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
213                 ret = 1;
214         }
215         put_cpu();
216         return ret;
217 }
218 EXPORT_SYMBOL_GPL(queue_work);
219
220 void delayed_work_timer_fn(unsigned long __data)
221 {
222         struct delayed_work *dwork = (struct delayed_work *)__data;
223         struct workqueue_struct *wq = get_wq_data(&dwork->work);
224         int cpu = smp_processor_id();
225
226         if (unlikely(is_single_threaded(wq)))
227                 cpu = singlethread_cpu;
228
229         __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
230 }
231
232 /**
233  * queue_delayed_work - queue work on a workqueue after delay
234  * @wq: workqueue to use
235  * @dwork: delayable work to queue
236  * @delay: number of jiffies to wait before queueing
237  *
238  * Returns 0 if @work was already on a queue, non-zero otherwise.
239  */
240 int fastcall queue_delayed_work(struct workqueue_struct *wq,
241                         struct delayed_work *dwork, unsigned long delay)
242 {
243         int ret = 0;
244         struct timer_list *timer = &dwork->timer;
245         struct work_struct *work = &dwork->work;
246
247         timer_stats_timer_set_start_info(timer);
248         if (delay == 0)
249                 return queue_work(wq, work);
250
251         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252                 BUG_ON(timer_pending(timer));
253                 BUG_ON(!list_empty(&work->entry));
254
255                 /* This stores wq for the moment, for the timer_fn */
256                 set_wq_data(work, wq);
257                 timer->expires = jiffies + delay;
258                 timer->data = (unsigned long)dwork;
259                 timer->function = delayed_work_timer_fn;
260                 add_timer(timer);
261                 ret = 1;
262         }
263         return ret;
264 }
265 EXPORT_SYMBOL_GPL(queue_delayed_work);
266
267 /**
268  * queue_delayed_work_on - queue work on specific CPU after delay
269  * @cpu: CPU number to execute work on
270  * @wq: workqueue to use
271  * @dwork: work to queue
272  * @delay: number of jiffies to wait before queueing
273  *
274  * Returns 0 if @work was already on a queue, non-zero otherwise.
275  */
276 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
277                         struct delayed_work *dwork, unsigned long delay)
278 {
279         int ret = 0;
280         struct timer_list *timer = &dwork->timer;
281         struct work_struct *work = &dwork->work;
282
283         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284                 BUG_ON(timer_pending(timer));
285                 BUG_ON(!list_empty(&work->entry));
286
287                 /* This stores wq for the moment, for the timer_fn */
288                 set_wq_data(work, wq);
289                 timer->expires = jiffies + delay;
290                 timer->data = (unsigned long)dwork;
291                 timer->function = delayed_work_timer_fn;
292                 add_timer_on(timer, cpu);
293                 ret = 1;
294         }
295         return ret;
296 }
297 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
298
299 static void run_workqueue(struct cpu_workqueue_struct *cwq)
300 {
301         unsigned long flags;
302
303         /*
304          * Keep taking off work from the queue until
305          * done.
306          */
307         spin_lock_irqsave(&cwq->lock, flags);
308         cwq->run_depth++;
309         if (cwq->run_depth > 3) {
310                 /* morton gets to eat his hat */
311                 printk("%s: recursion depth exceeded: %d\n",
312                         __FUNCTION__, cwq->run_depth);
313                 dump_stack();
314         }
315         while (!list_empty(&cwq->worklist)) {
316                 struct work_struct *work = list_entry(cwq->worklist.next,
317                                                 struct work_struct, entry);
318                 work_func_t f = work->func;
319
320                 cwq->current_work = work;
321                 list_del_init(cwq->worklist.next);
322                 spin_unlock_irqrestore(&cwq->lock, flags);
323
324                 BUG_ON(get_wq_data(work) != cwq);
325                 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
326                         work_release(work);
327                 f(work);
328
329                 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
330                         printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
331                                         "%s/0x%08x/%d\n",
332                                         current->comm, preempt_count(),
333                                         current->pid);
334                         printk(KERN_ERR "    last function: ");
335                         print_symbol("%s\n", (unsigned long)f);
336                         debug_show_held_locks(current);
337                         dump_stack();
338                 }
339
340                 spin_lock_irqsave(&cwq->lock, flags);
341                 cwq->current_work = NULL;
342         }
343         cwq->run_depth--;
344         spin_unlock_irqrestore(&cwq->lock, flags);
345 }
346
347 static int worker_thread(void *__cwq)
348 {
349         struct cpu_workqueue_struct *cwq = __cwq;
350         DECLARE_WAITQUEUE(wait, current);
351         struct k_sigaction sa;
352         sigset_t blocked;
353
354         if (!cwq->freezeable)
355                 current->flags |= PF_NOFREEZE;
356
357         set_user_nice(current, -5);
358
359         /* Block and flush all signals */
360         sigfillset(&blocked);
361         sigprocmask(SIG_BLOCK, &blocked, NULL);
362         flush_signals(current);
363
364         /*
365          * We inherited MPOL_INTERLEAVE from the booting kernel.
366          * Set MPOL_DEFAULT to insure node local allocations.
367          */
368         numa_default_policy();
369
370         /* SIG_IGN makes children autoreap: see do_notify_parent(). */
371         sa.sa.sa_handler = SIG_IGN;
372         sa.sa.sa_flags = 0;
373         siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
374         do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
375
376         set_current_state(TASK_INTERRUPTIBLE);
377         while (!kthread_should_stop()) {
378                 if (cwq->freezeable)
379                         try_to_freeze();
380
381                 add_wait_queue(&cwq->more_work, &wait);
382                 if (list_empty(&cwq->worklist))
383                         schedule();
384                 else
385                         __set_current_state(TASK_RUNNING);
386                 remove_wait_queue(&cwq->more_work, &wait);
387
388                 if (!list_empty(&cwq->worklist))
389                         run_workqueue(cwq);
390                 set_current_state(TASK_INTERRUPTIBLE);
391         }
392         __set_current_state(TASK_RUNNING);
393         return 0;
394 }
395
396 struct wq_barrier {
397         struct work_struct      work;
398         struct completion       done;
399 };
400
401 static void wq_barrier_func(struct work_struct *work)
402 {
403         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
404         complete(&barr->done);
405 }
406
407 static inline void init_wq_barrier(struct wq_barrier *barr)
408 {
409         INIT_WORK(&barr->work, wq_barrier_func);
410         __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
411
412         init_completion(&barr->done);
413 }
414
415 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
416 {
417         if (cwq->thread == current) {
418                 /*
419                  * Probably keventd trying to flush its own queue. So simply run
420                  * it by hand rather than deadlocking.
421                  */
422                 mutex_unlock(&workqueue_mutex);
423                 run_workqueue(cwq);
424                 mutex_lock(&workqueue_mutex);
425         } else {
426                 struct wq_barrier barr;
427
428                 init_wq_barrier(&barr);
429                 __queue_work(cwq, &barr.work);
430
431                 mutex_unlock(&workqueue_mutex);
432                 wait_for_completion(&barr.done);
433                 mutex_lock(&workqueue_mutex);
434         }
435 }
436
437 /**
438  * flush_workqueue - ensure that any scheduled work has run to completion.
439  * @wq: workqueue to flush
440  *
441  * Forces execution of the workqueue and blocks until its completion.
442  * This is typically used in driver shutdown handlers.
443  *
444  * We sleep until all works which were queued on entry have been handled,
445  * but we are not livelocked by new incoming ones.
446  *
447  * This function used to run the workqueues itself.  Now we just wait for the
448  * helper threads to do it.
449  */
450 void fastcall flush_workqueue(struct workqueue_struct *wq)
451 {
452         mutex_lock(&workqueue_mutex);
453         if (is_single_threaded(wq)) {
454                 /* Always use first cpu's area. */
455                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
456         } else {
457                 int cpu;
458
459                 for_each_online_cpu(cpu)
460                         flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
461         }
462         mutex_unlock(&workqueue_mutex);
463 }
464 EXPORT_SYMBOL_GPL(flush_workqueue);
465
466 static void wait_on_work(struct cpu_workqueue_struct *cwq,
467                                 struct work_struct *work)
468 {
469         struct wq_barrier barr;
470         int running = 0;
471
472         spin_lock_irq(&cwq->lock);
473         if (unlikely(cwq->current_work == work)) {
474                 init_wq_barrier(&barr);
475                 insert_work(cwq, &barr.work, 0);
476                 running = 1;
477         }
478         spin_unlock_irq(&cwq->lock);
479
480         if (unlikely(running)) {
481                 mutex_unlock(&workqueue_mutex);
482                 wait_for_completion(&barr.done);
483                 mutex_lock(&workqueue_mutex);
484         }
485 }
486
487 /**
488  * flush_work - block until a work_struct's callback has terminated
489  * @wq: the workqueue on which the work is queued
490  * @work: the work which is to be flushed
491  *
492  * flush_work() will attempt to cancel the work if it is queued.  If the work's
493  * callback appears to be running, flush_work() will block until it has
494  * completed.
495  *
496  * flush_work() is designed to be used when the caller is tearing down data
497  * structures which the callback function operates upon.  It is expected that,
498  * prior to calling flush_work(), the caller has arranged for the work to not
499  * be requeued.
500  */
501 void flush_work(struct workqueue_struct *wq, struct work_struct *work)
502 {
503         struct cpu_workqueue_struct *cwq;
504
505         mutex_lock(&workqueue_mutex);
506         cwq = get_wq_data(work);
507         /* Was it ever queued ? */
508         if (!cwq)
509                 goto out;
510
511         /*
512          * This work can't be re-queued, and the lock above protects us
513          * from take_over_work(), no need to re-check that get_wq_data()
514          * is still the same when we take cwq->lock.
515          */
516         spin_lock_irq(&cwq->lock);
517         list_del_init(&work->entry);
518         work_release(work);
519         spin_unlock_irq(&cwq->lock);
520
521         if (is_single_threaded(wq)) {
522                 /* Always use first cpu's area. */
523                 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
524         } else {
525                 int cpu;
526
527                 for_each_online_cpu(cpu)
528                         wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
529         }
530 out:
531         mutex_unlock(&workqueue_mutex);
532 }
533 EXPORT_SYMBOL_GPL(flush_work);
534
535 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
536                                                    int cpu, int freezeable)
537 {
538         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
539         struct task_struct *p;
540
541         spin_lock_init(&cwq->lock);
542         cwq->wq = wq;
543         cwq->thread = NULL;
544         cwq->freezeable = freezeable;
545         INIT_LIST_HEAD(&cwq->worklist);
546         init_waitqueue_head(&cwq->more_work);
547
548         if (is_single_threaded(wq))
549                 p = kthread_create(worker_thread, cwq, "%s", wq->name);
550         else
551                 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
552         if (IS_ERR(p))
553                 return NULL;
554         cwq->thread = p;
555         return p;
556 }
557
558 struct workqueue_struct *__create_workqueue(const char *name,
559                                             int singlethread, int freezeable)
560 {
561         int cpu, destroy = 0;
562         struct workqueue_struct *wq;
563         struct task_struct *p;
564
565         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
566         if (!wq)
567                 return NULL;
568
569         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
570         if (!wq->cpu_wq) {
571                 kfree(wq);
572                 return NULL;
573         }
574
575         wq->name = name;
576         mutex_lock(&workqueue_mutex);
577         if (singlethread) {
578                 INIT_LIST_HEAD(&wq->list);
579                 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
580                 if (!p)
581                         destroy = 1;
582                 else
583                         wake_up_process(p);
584         } else {
585                 list_add(&wq->list, &workqueues);
586                 for_each_online_cpu(cpu) {
587                         p = create_workqueue_thread(wq, cpu, freezeable);
588                         if (p) {
589                                 kthread_bind(p, cpu);
590                                 wake_up_process(p);
591                         } else
592                                 destroy = 1;
593                 }
594         }
595         mutex_unlock(&workqueue_mutex);
596
597         /*
598          * Was there any error during startup? If yes then clean up:
599          */
600         if (destroy) {
601                 destroy_workqueue(wq);
602                 wq = NULL;
603         }
604         return wq;
605 }
606 EXPORT_SYMBOL_GPL(__create_workqueue);
607
608 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
609 {
610         struct cpu_workqueue_struct *cwq;
611         unsigned long flags;
612         struct task_struct *p;
613
614         cwq = per_cpu_ptr(wq->cpu_wq, cpu);
615         spin_lock_irqsave(&cwq->lock, flags);
616         p = cwq->thread;
617         cwq->thread = NULL;
618         spin_unlock_irqrestore(&cwq->lock, flags);
619         if (p)
620                 kthread_stop(p);
621 }
622
623 /**
624  * destroy_workqueue - safely terminate a workqueue
625  * @wq: target workqueue
626  *
627  * Safely destroy a workqueue. All work currently pending will be done first.
628  */
629 void destroy_workqueue(struct workqueue_struct *wq)
630 {
631         int cpu;
632
633         flush_workqueue(wq);
634
635         /* We don't need the distraction of CPUs appearing and vanishing. */
636         mutex_lock(&workqueue_mutex);
637         if (is_single_threaded(wq))
638                 cleanup_workqueue_thread(wq, singlethread_cpu);
639         else {
640                 for_each_online_cpu(cpu)
641                         cleanup_workqueue_thread(wq, cpu);
642                 list_del(&wq->list);
643         }
644         mutex_unlock(&workqueue_mutex);
645         free_percpu(wq->cpu_wq);
646         kfree(wq);
647 }
648 EXPORT_SYMBOL_GPL(destroy_workqueue);
649
650 static struct workqueue_struct *keventd_wq;
651
652 /**
653  * schedule_work - put work task in global workqueue
654  * @work: job to be done
655  *
656  * This puts a job in the kernel-global workqueue.
657  */
658 int fastcall schedule_work(struct work_struct *work)
659 {
660         return queue_work(keventd_wq, work);
661 }
662 EXPORT_SYMBOL(schedule_work);
663
664 /**
665  * schedule_delayed_work - put work task in global workqueue after delay
666  * @dwork: job to be done
667  * @delay: number of jiffies to wait or 0 for immediate execution
668  *
669  * After waiting for a given time this puts a job in the kernel-global
670  * workqueue.
671  */
672 int fastcall schedule_delayed_work(struct delayed_work *dwork,
673                                         unsigned long delay)
674 {
675         timer_stats_timer_set_start_info(&dwork->timer);
676         return queue_delayed_work(keventd_wq, dwork, delay);
677 }
678 EXPORT_SYMBOL(schedule_delayed_work);
679
680 /**
681  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
682  * @cpu: cpu to use
683  * @dwork: job to be done
684  * @delay: number of jiffies to wait
685  *
686  * After waiting for a given time this puts a job in the kernel-global
687  * workqueue on the specified CPU.
688  */
689 int schedule_delayed_work_on(int cpu,
690                         struct delayed_work *dwork, unsigned long delay)
691 {
692         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
693 }
694 EXPORT_SYMBOL(schedule_delayed_work_on);
695
696 /**
697  * schedule_on_each_cpu - call a function on each online CPU from keventd
698  * @func: the function to call
699  *
700  * Returns zero on success.
701  * Returns -ve errno on failure.
702  *
703  * Appears to be racy against CPU hotplug.
704  *
705  * schedule_on_each_cpu() is very slow.
706  */
707 int schedule_on_each_cpu(work_func_t func)
708 {
709         int cpu;
710         struct work_struct *works;
711
712         works = alloc_percpu(struct work_struct);
713         if (!works)
714                 return -ENOMEM;
715
716         preempt_disable();              /* CPU hotplug */
717         for_each_online_cpu(cpu) {
718                 struct work_struct *work = per_cpu_ptr(works, cpu);
719
720                 INIT_WORK(work, func);
721                 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
722                 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
723         }
724         preempt_enable();
725         flush_workqueue(keventd_wq);
726         free_percpu(works);
727         return 0;
728 }
729
730 void flush_scheduled_work(void)
731 {
732         flush_workqueue(keventd_wq);
733 }
734 EXPORT_SYMBOL(flush_scheduled_work);
735
736 void flush_work_keventd(struct work_struct *work)
737 {
738         flush_work(keventd_wq, work);
739 }
740 EXPORT_SYMBOL(flush_work_keventd);
741
742 /**
743  * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
744  * @wq:   the controlling workqueue structure
745  * @dwork: the delayed work struct
746  */
747 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
748                                        struct delayed_work *dwork)
749 {
750         while (!cancel_delayed_work(dwork))
751                 flush_workqueue(wq);
752 }
753 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
754
755 /**
756  * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
757  * @dwork: the delayed work struct
758  */
759 void cancel_rearming_delayed_work(struct delayed_work *dwork)
760 {
761         cancel_rearming_delayed_workqueue(keventd_wq, dwork);
762 }
763 EXPORT_SYMBOL(cancel_rearming_delayed_work);
764
765 /**
766  * execute_in_process_context - reliably execute the routine with user context
767  * @fn:         the function to execute
768  * @ew:         guaranteed storage for the execute work structure (must
769  *              be available when the work executes)
770  *
771  * Executes the function immediately if process context is available,
772  * otherwise schedules the function for delayed execution.
773  *
774  * Returns:     0 - function was executed
775  *              1 - function was scheduled for execution
776  */
777 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
778 {
779         if (!in_interrupt()) {
780                 fn(&ew->work);
781                 return 0;
782         }
783
784         INIT_WORK(&ew->work, fn);
785         schedule_work(&ew->work);
786
787         return 1;
788 }
789 EXPORT_SYMBOL_GPL(execute_in_process_context);
790
791 int keventd_up(void)
792 {
793         return keventd_wq != NULL;
794 }
795
796 int current_is_keventd(void)
797 {
798         struct cpu_workqueue_struct *cwq;
799         int cpu = smp_processor_id();   /* preempt-safe: keventd is per-cpu */
800         int ret = 0;
801
802         BUG_ON(!keventd_wq);
803
804         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
805         if (current == cwq->thread)
806                 ret = 1;
807
808         return ret;
809
810 }
811
812 /* Take the work from this (downed) CPU. */
813 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
814 {
815         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
816         struct list_head list;
817         struct work_struct *work;
818
819         spin_lock_irq(&cwq->lock);
820         list_replace_init(&cwq->worklist, &list);
821
822         while (!list_empty(&list)) {
823                 printk("Taking work for %s\n", wq->name);
824                 work = list_entry(list.next,struct work_struct,entry);
825                 list_del(&work->entry);
826                 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
827         }
828         spin_unlock_irq(&cwq->lock);
829 }
830
831 /* We're holding the cpucontrol mutex here */
832 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
833                                   unsigned long action,
834                                   void *hcpu)
835 {
836         unsigned int hotcpu = (unsigned long)hcpu;
837         struct workqueue_struct *wq;
838
839         switch (action) {
840         case CPU_UP_PREPARE:
841                 mutex_lock(&workqueue_mutex);
842                 /* Create a new workqueue thread for it. */
843                 list_for_each_entry(wq, &workqueues, list) {
844                         if (!create_workqueue_thread(wq, hotcpu, 0)) {
845                                 printk("workqueue for %i failed\n", hotcpu);
846                                 return NOTIFY_BAD;
847                         }
848                 }
849                 break;
850
851         case CPU_ONLINE:
852                 /* Kick off worker threads. */
853                 list_for_each_entry(wq, &workqueues, list) {
854                         struct cpu_workqueue_struct *cwq;
855
856                         cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
857                         kthread_bind(cwq->thread, hotcpu);
858                         wake_up_process(cwq->thread);
859                 }
860                 mutex_unlock(&workqueue_mutex);
861                 break;
862
863         case CPU_UP_CANCELED:
864                 list_for_each_entry(wq, &workqueues, list) {
865                         if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
866                                 continue;
867                         /* Unbind so it can run. */
868                         kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
869                                      any_online_cpu(cpu_online_map));
870                         cleanup_workqueue_thread(wq, hotcpu);
871                 }
872                 mutex_unlock(&workqueue_mutex);
873                 break;
874
875         case CPU_DOWN_PREPARE:
876                 mutex_lock(&workqueue_mutex);
877                 break;
878
879         case CPU_DOWN_FAILED:
880                 mutex_unlock(&workqueue_mutex);
881                 break;
882
883         case CPU_DEAD:
884                 list_for_each_entry(wq, &workqueues, list)
885                         cleanup_workqueue_thread(wq, hotcpu);
886                 list_for_each_entry(wq, &workqueues, list)
887                         take_over_work(wq, hotcpu);
888                 mutex_unlock(&workqueue_mutex);
889                 break;
890         }
891
892         return NOTIFY_OK;
893 }
894
895 void init_workqueues(void)
896 {
897         singlethread_cpu = first_cpu(cpu_possible_map);
898         hotcpu_notifier(workqueue_cpu_callback, 0);
899         keventd_wq = create_workqueue("events");
900         BUG_ON(!keventd_wq);
901 }
902