unify flush_work/flush_work_keventd and rename it to cancel_work_sync
[safe/jmp/linux-2.6] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35
36 /*
37  * The per-CPU workqueue (if single thread, we always use the first
38  * possible cpu).
39  */
40 struct cpu_workqueue_struct {
41
42         spinlock_t lock;
43
44         struct list_head worklist;
45         wait_queue_head_t more_work;
46         struct work_struct *current_work;
47
48         struct workqueue_struct *wq;
49         struct task_struct *thread;
50         int should_stop;
51
52         int run_depth;          /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54
55 /*
56  * The externally visible workqueue abstraction is an array of
57  * per-CPU workqueues:
58  */
59 struct workqueue_struct {
60         struct cpu_workqueue_struct *cpu_wq;
61         struct list_head list;
62         const char *name;
63         int singlethread;
64         int freezeable;         /* Freeze threads during suspend */
65 };
66
67 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68    threads to each one as cpus come/go. */
69 static DEFINE_MUTEX(workqueue_mutex);
70 static LIST_HEAD(workqueues);
71
72 static int singlethread_cpu __read_mostly;
73 static cpumask_t cpu_singlethread_map __read_mostly;
74 /* optimization, we could use cpu_possible_map */
75 static cpumask_t cpu_populated_map __read_mostly;
76
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct *wq)
79 {
80         return wq->singlethread;
81 }
82
83 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84 {
85         return is_single_threaded(wq)
86                 ? &cpu_singlethread_map : &cpu_populated_map;
87 }
88
89 static
90 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
91 {
92         if (unlikely(is_single_threaded(wq)))
93                 cpu = singlethread_cpu;
94         return per_cpu_ptr(wq->cpu_wq, cpu);
95 }
96
97 /*
98  * Set the workqueue on which a work item is to be run
99  * - Must *only* be called if the pending flag is set
100  */
101 static inline void set_wq_data(struct work_struct *work,
102                                 struct cpu_workqueue_struct *cwq)
103 {
104         unsigned long new;
105
106         BUG_ON(!work_pending(work));
107
108         new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
109         new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
110         atomic_long_set(&work->data, new);
111 }
112
113 static inline
114 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
115 {
116         return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
117 }
118
119 static void insert_work(struct cpu_workqueue_struct *cwq,
120                                 struct work_struct *work, int tail)
121 {
122         set_wq_data(work, cwq);
123         if (tail)
124                 list_add_tail(&work->entry, &cwq->worklist);
125         else
126                 list_add(&work->entry, &cwq->worklist);
127         wake_up(&cwq->more_work);
128 }
129
130 /* Preempt must be disabled. */
131 static void __queue_work(struct cpu_workqueue_struct *cwq,
132                          struct work_struct *work)
133 {
134         unsigned long flags;
135
136         spin_lock_irqsave(&cwq->lock, flags);
137         insert_work(cwq, work, 1);
138         spin_unlock_irqrestore(&cwq->lock, flags);
139 }
140
141 /**
142  * queue_work - queue work on a workqueue
143  * @wq: workqueue to use
144  * @work: work to queue
145  *
146  * Returns 0 if @work was already on a queue, non-zero otherwise.
147  *
148  * We queue the work to the CPU it was submitted, but there is no
149  * guarantee that it will be processed by that CPU.
150  */
151 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
152 {
153         int ret = 0;
154
155         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
156                 BUG_ON(!list_empty(&work->entry));
157                 __queue_work(wq_per_cpu(wq, get_cpu()), work);
158                 put_cpu();
159                 ret = 1;
160         }
161         return ret;
162 }
163 EXPORT_SYMBOL_GPL(queue_work);
164
165 void delayed_work_timer_fn(unsigned long __data)
166 {
167         struct delayed_work *dwork = (struct delayed_work *)__data;
168         struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
169         struct workqueue_struct *wq = cwq->wq;
170
171         __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
172 }
173
174 /**
175  * queue_delayed_work - queue work on a workqueue after delay
176  * @wq: workqueue to use
177  * @dwork: delayable work to queue
178  * @delay: number of jiffies to wait before queueing
179  *
180  * Returns 0 if @work was already on a queue, non-zero otherwise.
181  */
182 int fastcall queue_delayed_work(struct workqueue_struct *wq,
183                         struct delayed_work *dwork, unsigned long delay)
184 {
185         timer_stats_timer_set_start_info(&dwork->timer);
186         if (delay == 0)
187                 return queue_work(wq, &dwork->work);
188
189         return queue_delayed_work_on(-1, wq, dwork, delay);
190 }
191 EXPORT_SYMBOL_GPL(queue_delayed_work);
192
193 /**
194  * queue_delayed_work_on - queue work on specific CPU after delay
195  * @cpu: CPU number to execute work on
196  * @wq: workqueue to use
197  * @dwork: work to queue
198  * @delay: number of jiffies to wait before queueing
199  *
200  * Returns 0 if @work was already on a queue, non-zero otherwise.
201  */
202 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
203                         struct delayed_work *dwork, unsigned long delay)
204 {
205         int ret = 0;
206         struct timer_list *timer = &dwork->timer;
207         struct work_struct *work = &dwork->work;
208
209         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210                 BUG_ON(timer_pending(timer));
211                 BUG_ON(!list_empty(&work->entry));
212
213                 /* This stores cwq for the moment, for the timer_fn */
214                 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
215                 timer->expires = jiffies + delay;
216                 timer->data = (unsigned long)dwork;
217                 timer->function = delayed_work_timer_fn;
218
219                 if (unlikely(cpu >= 0))
220                         add_timer_on(timer, cpu);
221                 else
222                         add_timer(timer);
223                 ret = 1;
224         }
225         return ret;
226 }
227 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
228
229 static void run_workqueue(struct cpu_workqueue_struct *cwq)
230 {
231         spin_lock_irq(&cwq->lock);
232         cwq->run_depth++;
233         if (cwq->run_depth > 3) {
234                 /* morton gets to eat his hat */
235                 printk("%s: recursion depth exceeded: %d\n",
236                         __FUNCTION__, cwq->run_depth);
237                 dump_stack();
238         }
239         while (!list_empty(&cwq->worklist)) {
240                 struct work_struct *work = list_entry(cwq->worklist.next,
241                                                 struct work_struct, entry);
242                 work_func_t f = work->func;
243
244                 cwq->current_work = work;
245                 list_del_init(cwq->worklist.next);
246                 spin_unlock_irq(&cwq->lock);
247
248                 BUG_ON(get_wq_data(work) != cwq);
249                 work_clear_pending(work);
250                 f(work);
251
252                 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
253                         printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
254                                         "%s/0x%08x/%d\n",
255                                         current->comm, preempt_count(),
256                                         current->pid);
257                         printk(KERN_ERR "    last function: ");
258                         print_symbol("%s\n", (unsigned long)f);
259                         debug_show_held_locks(current);
260                         dump_stack();
261                 }
262
263                 spin_lock_irq(&cwq->lock);
264                 cwq->current_work = NULL;
265         }
266         cwq->run_depth--;
267         spin_unlock_irq(&cwq->lock);
268 }
269
270 /*
271  * NOTE: the caller must not touch *cwq if this func returns true
272  */
273 static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
274 {
275         int should_stop = cwq->should_stop;
276
277         if (unlikely(should_stop)) {
278                 spin_lock_irq(&cwq->lock);
279                 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
280                 if (should_stop)
281                         cwq->thread = NULL;
282                 spin_unlock_irq(&cwq->lock);
283         }
284
285         return should_stop;
286 }
287
288 static int worker_thread(void *__cwq)
289 {
290         struct cpu_workqueue_struct *cwq = __cwq;
291         DEFINE_WAIT(wait);
292         struct k_sigaction sa;
293
294         if (!cwq->wq->freezeable)
295                 current->flags |= PF_NOFREEZE;
296
297         set_user_nice(current, -5);
298         /*
299          * We inherited MPOL_INTERLEAVE from the booting kernel.
300          * Set MPOL_DEFAULT to insure node local allocations.
301          */
302         numa_default_policy();
303
304         /* SIG_IGN makes children autoreap: see do_notify_parent(). */
305         sa.sa.sa_handler = SIG_IGN;
306         sa.sa.sa_flags = 0;
307         siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
308         do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
309
310         for (;;) {
311                 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
312                 if (!freezing(current) && !cwq->should_stop
313                     && list_empty(&cwq->worklist))
314                         schedule();
315                 finish_wait(&cwq->more_work, &wait);
316
317                 try_to_freeze();
318
319                 if (cwq_should_stop(cwq))
320                         break;
321
322                 run_workqueue(cwq);
323         }
324
325         return 0;
326 }
327
328 struct wq_barrier {
329         struct work_struct      work;
330         struct completion       done;
331 };
332
333 static void wq_barrier_func(struct work_struct *work)
334 {
335         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
336         complete(&barr->done);
337 }
338
339 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340                                         struct wq_barrier *barr, int tail)
341 {
342         INIT_WORK(&barr->work, wq_barrier_func);
343         __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
344
345         init_completion(&barr->done);
346
347         insert_work(cwq, &barr->work, tail);
348 }
349
350 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
351 {
352         if (cwq->thread == current) {
353                 /*
354                  * Probably keventd trying to flush its own queue. So simply run
355                  * it by hand rather than deadlocking.
356                  */
357                 run_workqueue(cwq);
358         } else {
359                 struct wq_barrier barr;
360                 int active = 0;
361
362                 spin_lock_irq(&cwq->lock);
363                 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
364                         insert_wq_barrier(cwq, &barr, 1);
365                         active = 1;
366                 }
367                 spin_unlock_irq(&cwq->lock);
368
369                 if (active)
370                         wait_for_completion(&barr.done);
371         }
372 }
373
374 /**
375  * flush_workqueue - ensure that any scheduled work has run to completion.
376  * @wq: workqueue to flush
377  *
378  * Forces execution of the workqueue and blocks until its completion.
379  * This is typically used in driver shutdown handlers.
380  *
381  * We sleep until all works which were queued on entry have been handled,
382  * but we are not livelocked by new incoming ones.
383  *
384  * This function used to run the workqueues itself.  Now we just wait for the
385  * helper threads to do it.
386  */
387 void fastcall flush_workqueue(struct workqueue_struct *wq)
388 {
389         const cpumask_t *cpu_map = wq_cpu_map(wq);
390         int cpu;
391
392         might_sleep();
393         for_each_cpu_mask(cpu, *cpu_map)
394                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
395 }
396 EXPORT_SYMBOL_GPL(flush_workqueue);
397
398 static void wait_on_work(struct cpu_workqueue_struct *cwq,
399                                 struct work_struct *work)
400 {
401         struct wq_barrier barr;
402         int running = 0;
403
404         spin_lock_irq(&cwq->lock);
405         if (unlikely(cwq->current_work == work)) {
406                 insert_wq_barrier(cwq, &barr, 0);
407                 running = 1;
408         }
409         spin_unlock_irq(&cwq->lock);
410
411         if (unlikely(running))
412                 wait_for_completion(&barr.done);
413 }
414
415 /**
416  * cancel_work_sync - block until a work_struct's callback has terminated
417  * @work: the work which is to be flushed
418  *
419  * cancel_work_sync() will attempt to cancel the work if it is queued. If the
420  * work's callback appears to be running, cancel_work_sync() will block until
421  * it has completed.
422  *
423  * cancel_work_sync() is designed to be used when the caller is tearing down
424  * data structures which the callback function operates upon. It is expected
425  * that, prior to calling cancel_work_sync(), the caller has arranged for the
426  * work to not be requeued.
427  */
428 void cancel_work_sync(struct work_struct *work)
429 {
430         struct cpu_workqueue_struct *cwq;
431         struct workqueue_struct *wq;
432         const cpumask_t *cpu_map;
433         int cpu;
434
435         might_sleep();
436
437         cwq = get_wq_data(work);
438         /* Was it ever queued ? */
439         if (!cwq)
440                 return;
441
442         /*
443          * This work can't be re-queued, no need to re-check that
444          * get_wq_data() is still the same when we take cwq->lock.
445          */
446         spin_lock_irq(&cwq->lock);
447         list_del_init(&work->entry);
448         work_clear_pending(work);
449         spin_unlock_irq(&cwq->lock);
450
451         wq = cwq->wq;
452         cpu_map = wq_cpu_map(wq);
453
454         for_each_cpu_mask(cpu, *cpu_map)
455                 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
456 }
457 EXPORT_SYMBOL_GPL(cancel_work_sync);
458
459
460 static struct workqueue_struct *keventd_wq;
461
462 /**
463  * schedule_work - put work task in global workqueue
464  * @work: job to be done
465  *
466  * This puts a job in the kernel-global workqueue.
467  */
468 int fastcall schedule_work(struct work_struct *work)
469 {
470         return queue_work(keventd_wq, work);
471 }
472 EXPORT_SYMBOL(schedule_work);
473
474 /**
475  * schedule_delayed_work - put work task in global workqueue after delay
476  * @dwork: job to be done
477  * @delay: number of jiffies to wait or 0 for immediate execution
478  *
479  * After waiting for a given time this puts a job in the kernel-global
480  * workqueue.
481  */
482 int fastcall schedule_delayed_work(struct delayed_work *dwork,
483                                         unsigned long delay)
484 {
485         timer_stats_timer_set_start_info(&dwork->timer);
486         return queue_delayed_work(keventd_wq, dwork, delay);
487 }
488 EXPORT_SYMBOL(schedule_delayed_work);
489
490 /**
491  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
492  * @cpu: cpu to use
493  * @dwork: job to be done
494  * @delay: number of jiffies to wait
495  *
496  * After waiting for a given time this puts a job in the kernel-global
497  * workqueue on the specified CPU.
498  */
499 int schedule_delayed_work_on(int cpu,
500                         struct delayed_work *dwork, unsigned long delay)
501 {
502         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
503 }
504 EXPORT_SYMBOL(schedule_delayed_work_on);
505
506 /**
507  * schedule_on_each_cpu - call a function on each online CPU from keventd
508  * @func: the function to call
509  *
510  * Returns zero on success.
511  * Returns -ve errno on failure.
512  *
513  * Appears to be racy against CPU hotplug.
514  *
515  * schedule_on_each_cpu() is very slow.
516  */
517 int schedule_on_each_cpu(work_func_t func)
518 {
519         int cpu;
520         struct work_struct *works;
521
522         works = alloc_percpu(struct work_struct);
523         if (!works)
524                 return -ENOMEM;
525
526         preempt_disable();              /* CPU hotplug */
527         for_each_online_cpu(cpu) {
528                 struct work_struct *work = per_cpu_ptr(works, cpu);
529
530                 INIT_WORK(work, func);
531                 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
532                 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
533         }
534         preempt_enable();
535         flush_workqueue(keventd_wq);
536         free_percpu(works);
537         return 0;
538 }
539
540 void flush_scheduled_work(void)
541 {
542         flush_workqueue(keventd_wq);
543 }
544 EXPORT_SYMBOL(flush_scheduled_work);
545
546 /**
547  * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
548  * @dwork: the delayed work struct
549  *
550  * Note that the work callback function may still be running on return from
551  * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
552  * on it.
553  */
554 void cancel_rearming_delayed_work(struct delayed_work *dwork)
555 {
556         struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
557
558         /* Was it ever queued ? */
559         if (cwq != NULL) {
560                 struct workqueue_struct *wq = cwq->wq;
561
562                 while (!cancel_delayed_work(dwork))
563                         flush_workqueue(wq);
564         }
565 }
566 EXPORT_SYMBOL(cancel_rearming_delayed_work);
567
568 /**
569  * execute_in_process_context - reliably execute the routine with user context
570  * @fn:         the function to execute
571  * @ew:         guaranteed storage for the execute work structure (must
572  *              be available when the work executes)
573  *
574  * Executes the function immediately if process context is available,
575  * otherwise schedules the function for delayed execution.
576  *
577  * Returns:     0 - function was executed
578  *              1 - function was scheduled for execution
579  */
580 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
581 {
582         if (!in_interrupt()) {
583                 fn(&ew->work);
584                 return 0;
585         }
586
587         INIT_WORK(&ew->work, fn);
588         schedule_work(&ew->work);
589
590         return 1;
591 }
592 EXPORT_SYMBOL_GPL(execute_in_process_context);
593
594 int keventd_up(void)
595 {
596         return keventd_wq != NULL;
597 }
598
599 int current_is_keventd(void)
600 {
601         struct cpu_workqueue_struct *cwq;
602         int cpu = smp_processor_id();   /* preempt-safe: keventd is per-cpu */
603         int ret = 0;
604
605         BUG_ON(!keventd_wq);
606
607         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
608         if (current == cwq->thread)
609                 ret = 1;
610
611         return ret;
612
613 }
614
615 static struct cpu_workqueue_struct *
616 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
617 {
618         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
619
620         cwq->wq = wq;
621         spin_lock_init(&cwq->lock);
622         INIT_LIST_HEAD(&cwq->worklist);
623         init_waitqueue_head(&cwq->more_work);
624
625         return cwq;
626 }
627
628 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
629 {
630         struct workqueue_struct *wq = cwq->wq;
631         const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
632         struct task_struct *p;
633
634         p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
635         /*
636          * Nobody can add the work_struct to this cwq,
637          *      if (caller is __create_workqueue)
638          *              nobody should see this wq
639          *      else // caller is CPU_UP_PREPARE
640          *              cpu is not on cpu_online_map
641          * so we can abort safely.
642          */
643         if (IS_ERR(p))
644                 return PTR_ERR(p);
645
646         cwq->thread = p;
647         cwq->should_stop = 0;
648
649         return 0;
650 }
651
652 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
653 {
654         struct task_struct *p = cwq->thread;
655
656         if (p != NULL) {
657                 if (cpu >= 0)
658                         kthread_bind(p, cpu);
659                 wake_up_process(p);
660         }
661 }
662
663 struct workqueue_struct *__create_workqueue(const char *name,
664                                             int singlethread, int freezeable)
665 {
666         struct workqueue_struct *wq;
667         struct cpu_workqueue_struct *cwq;
668         int err = 0, cpu;
669
670         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
671         if (!wq)
672                 return NULL;
673
674         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
675         if (!wq->cpu_wq) {
676                 kfree(wq);
677                 return NULL;
678         }
679
680         wq->name = name;
681         wq->singlethread = singlethread;
682         wq->freezeable = freezeable;
683         INIT_LIST_HEAD(&wq->list);
684
685         if (singlethread) {
686                 cwq = init_cpu_workqueue(wq, singlethread_cpu);
687                 err = create_workqueue_thread(cwq, singlethread_cpu);
688                 start_workqueue_thread(cwq, -1);
689         } else {
690                 mutex_lock(&workqueue_mutex);
691                 list_add(&wq->list, &workqueues);
692
693                 for_each_possible_cpu(cpu) {
694                         cwq = init_cpu_workqueue(wq, cpu);
695                         if (err || !cpu_online(cpu))
696                                 continue;
697                         err = create_workqueue_thread(cwq, cpu);
698                         start_workqueue_thread(cwq, cpu);
699                 }
700                 mutex_unlock(&workqueue_mutex);
701         }
702
703         if (err) {
704                 destroy_workqueue(wq);
705                 wq = NULL;
706         }
707         return wq;
708 }
709 EXPORT_SYMBOL_GPL(__create_workqueue);
710
711 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
712 {
713         struct wq_barrier barr;
714         int alive = 0;
715
716         spin_lock_irq(&cwq->lock);
717         if (cwq->thread != NULL) {
718                 insert_wq_barrier(cwq, &barr, 1);
719                 cwq->should_stop = 1;
720                 alive = 1;
721         }
722         spin_unlock_irq(&cwq->lock);
723
724         if (alive) {
725                 wait_for_completion(&barr.done);
726
727                 while (unlikely(cwq->thread != NULL))
728                         cpu_relax();
729                 /*
730                  * Wait until cwq->thread unlocks cwq->lock,
731                  * it won't touch *cwq after that.
732                  */
733                 smp_rmb();
734                 spin_unlock_wait(&cwq->lock);
735         }
736 }
737
738 /**
739  * destroy_workqueue - safely terminate a workqueue
740  * @wq: target workqueue
741  *
742  * Safely destroy a workqueue. All work currently pending will be done first.
743  */
744 void destroy_workqueue(struct workqueue_struct *wq)
745 {
746         const cpumask_t *cpu_map = wq_cpu_map(wq);
747         struct cpu_workqueue_struct *cwq;
748         int cpu;
749
750         mutex_lock(&workqueue_mutex);
751         list_del(&wq->list);
752         mutex_unlock(&workqueue_mutex);
753
754         for_each_cpu_mask(cpu, *cpu_map) {
755                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
756                 cleanup_workqueue_thread(cwq, cpu);
757         }
758
759         free_percpu(wq->cpu_wq);
760         kfree(wq);
761 }
762 EXPORT_SYMBOL_GPL(destroy_workqueue);
763
764 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
765                                                 unsigned long action,
766                                                 void *hcpu)
767 {
768         unsigned int cpu = (unsigned long)hcpu;
769         struct cpu_workqueue_struct *cwq;
770         struct workqueue_struct *wq;
771
772         switch (action) {
773         case CPU_LOCK_ACQUIRE:
774                 mutex_lock(&workqueue_mutex);
775                 return NOTIFY_OK;
776
777         case CPU_LOCK_RELEASE:
778                 mutex_unlock(&workqueue_mutex);
779                 return NOTIFY_OK;
780
781         case CPU_UP_PREPARE:
782                 cpu_set(cpu, cpu_populated_map);
783         }
784
785         list_for_each_entry(wq, &workqueues, list) {
786                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
787
788                 switch (action) {
789                 case CPU_UP_PREPARE:
790                         if (!create_workqueue_thread(cwq, cpu))
791                                 break;
792                         printk(KERN_ERR "workqueue for %i failed\n", cpu);
793                         return NOTIFY_BAD;
794
795                 case CPU_ONLINE:
796                         start_workqueue_thread(cwq, cpu);
797                         break;
798
799                 case CPU_UP_CANCELED:
800                         start_workqueue_thread(cwq, -1);
801                 case CPU_DEAD:
802                         cleanup_workqueue_thread(cwq, cpu);
803                         break;
804                 }
805         }
806
807         return NOTIFY_OK;
808 }
809
810 void __init init_workqueues(void)
811 {
812         cpu_populated_map = cpu_online_map;
813         singlethread_cpu = first_cpu(cpu_possible_map);
814         cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
815         hotcpu_notifier(workqueue_cpu_callback, 0);
816         keventd_wq = create_workqueue("events");
817         BUG_ON(!keventd_wq);
818 }