cgroup: annotate cgroup_init_subsys with __init
[safe/jmp/linux-2.6] / kernel / workqueue.c
index 87693b3..00ff4d0 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/freezer.h>
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
+#include <linux/lockdep.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -47,7 +48,6 @@ struct cpu_workqueue_struct {
 
        struct workqueue_struct *wq;
        struct task_struct *thread;
-       int should_stop;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
 } ____cacheline_aligned;
@@ -62,16 +62,24 @@ struct workqueue_struct {
        const char *name;
        int singlethread;
        int freezeable;         /* Freeze threads during suspend */
+#ifdef CONFIG_LOCKDEP
+       struct lockdep_map lockdep_map;
+#endif
 };
 
-/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
-   threads to each one as cpus come/go. */
-static DEFINE_MUTEX(workqueue_mutex);
+/* Serializes the accesses to the list of workqueues. */
+static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 
 static int singlethread_cpu __read_mostly;
 static cpumask_t cpu_singlethread_map __read_mostly;
-/* optimization, we could use cpu_possible_map */
+/*
+ * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
+ * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
+ * which comes in between can't use for_each_online_cpu(). We could
+ * use cpu_possible_map, the cpumask below is more a documentation
+ * than optimization.
+ */
 static cpumask_t cpu_populated_map __read_mostly;
 
 /* If it's single threaded, it isn't in the list of workqueues. */
@@ -120,6 +128,11 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
                                struct work_struct *work, int tail)
 {
        set_wq_data(work, cwq);
+       /*
+        * Ensure that we get the right work->data if we see the
+        * result of list_add() below, see try_to_grab_pending().
+        */
+       smp_wmb();
        if (tail)
                list_add_tail(&work->entry, &cwq->worklist);
        else
@@ -148,7 +161,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
  * We queue the work to the CPU it was submitted, but there is no
  * guarantee that it will be processed by that CPU.
  */
-int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
+int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        int ret = 0;
 
@@ -162,7 +175,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(queue_work);
 
-void delayed_work_timer_fn(unsigned long __data)
+static void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
        struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
@@ -179,7 +192,7 @@ void delayed_work_timer_fn(unsigned long __data)
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
-int fastcall queue_delayed_work(struct workqueue_struct *wq,
+int queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *dwork, unsigned long delay)
 {
        timer_stats_timer_set_start_info(&dwork->timer);
@@ -206,6 +219,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
 
+       timer_stats_timer_set_start_info(&dwork->timer);
        if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
@@ -240,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
                work_func_t f = work->func;
+#ifdef CONFIG_LOCKDEP
+               /*
+                * It is permissible to free the struct work_struct
+                * from inside the function that is called from it,
+                * this we need to take into account for lockdep too.
+                * To avoid bogus "held lock freed" warnings as well
+                * as problems when looking into work->lockdep_map,
+                * make a copy and use that here.
+                */
+               struct lockdep_map lockdep_map = work->lockdep_map;
+#endif
 
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
@@ -247,13 +272,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
 
                BUG_ON(get_wq_data(work) != cwq);
                work_clear_pending(work);
+               lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+               lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
                f(work);
+               lock_release(&lockdep_map, 1, _THIS_IP_);
+               lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
 
                if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                        printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
                                        "%s/0x%08x/%d\n",
                                        current->comm, preempt_count(),
-                                       current->pid);
+                                       task_pid_nr(current));
                        printk(KERN_ERR "    last function: ");
                        print_symbol("%s\n", (unsigned long)f);
                        debug_show_held_locks(current);
@@ -267,56 +296,27 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        spin_unlock_irq(&cwq->lock);
 }
 
-/*
- * NOTE: the caller must not touch *cwq if this func returns true
- */
-static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
-{
-       int should_stop = cwq->should_stop;
-
-       if (unlikely(should_stop)) {
-               spin_lock_irq(&cwq->lock);
-               should_stop = cwq->should_stop && list_empty(&cwq->worklist);
-               if (should_stop)
-                       cwq->thread = NULL;
-               spin_unlock_irq(&cwq->lock);
-       }
-
-       return should_stop;
-}
-
 static int worker_thread(void *__cwq)
 {
        struct cpu_workqueue_struct *cwq = __cwq;
        DEFINE_WAIT(wait);
-       struct k_sigaction sa;
 
-       if (!cwq->wq->freezeable)
-               current->flags |= PF_NOFREEZE;
+       if (cwq->wq->freezeable)
+               set_freezable();
 
        set_user_nice(current, -5);
-       /*
-        * We inherited MPOL_INTERLEAVE from the booting kernel.
-        * Set MPOL_DEFAULT to insure node local allocations.
-        */
-       numa_default_policy();
-
-       /* SIG_IGN makes children autoreap: see do_notify_parent(). */
-       sa.sa.sa_handler = SIG_IGN;
-       sa.sa.sa_flags = 0;
-       siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
-       do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
 
        for (;;) {
-               if (cwq->wq->freezeable)
-                       try_to_freeze();
-
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
-               if (!cwq->should_stop && list_empty(&cwq->worklist))
+               if (!freezing(current) &&
+                   !kthread_should_stop() &&
+                   list_empty(&cwq->worklist))
                        schedule();
                finish_wait(&cwq->more_work, &wait);
 
-               if (cwq_should_stop(cwq))
+               try_to_freeze();
+
+               if (kthread_should_stop())
                        break;
 
                run_workqueue(cwq);
@@ -347,18 +347,21 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        insert_work(cwq, &barr->work, tail);
 }
 
-static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
+static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
 {
+       int active;
+
        if (cwq->thread == current) {
                /*
                 * Probably keventd trying to flush its own queue. So simply run
                 * it by hand rather than deadlocking.
                 */
                run_workqueue(cwq);
+               active = 1;
        } else {
                struct wq_barrier barr;
-               int active = 0;
 
+               active = 0;
                spin_lock_irq(&cwq->lock);
                if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
                        insert_wq_barrier(cwq, &barr, 1);
@@ -369,6 +372,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
                if (active)
                        wait_for_completion(&barr.done);
        }
+
+       return active;
 }
 
 /**
@@ -384,18 +389,59 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  * This function used to run the workqueues itself.  Now we just wait for the
  * helper threads to do it.
  */
-void fastcall flush_workqueue(struct workqueue_struct *wq)
+void flush_workqueue(struct workqueue_struct *wq)
 {
        const cpumask_t *cpu_map = wq_cpu_map(wq);
        int cpu;
 
        might_sleep();
+       lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_release(&wq->lockdep_map, 1, _THIS_IP_);
        for_each_cpu_mask(cpu, *cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
-static void wait_on_work(struct cpu_workqueue_struct *cwq,
+/*
+ * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
+ * so this work can't be re-armed in any way.
+ */
+static int try_to_grab_pending(struct work_struct *work)
+{
+       struct cpu_workqueue_struct *cwq;
+       int ret = -1;
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
+               return 0;
+
+       /*
+        * The queueing is in progress, or it is already queued. Try to
+        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+        */
+
+       cwq = get_wq_data(work);
+       if (!cwq)
+               return ret;
+
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * This work is queued, but perhaps we locked the wrong cwq.
+                * In that case we must see the new value after rmb(), see
+                * insert_work()->wmb().
+                */
+               smp_rmb();
+               if (cwq == get_wq_data(work)) {
+                       list_del_init(&work->entry);
+                       ret = 1;
+               }
+       }
+       spin_unlock_irq(&cwq->lock);
+
+       return ret;
+}
+
+static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
                                struct work_struct *work)
 {
        struct wq_barrier barr;
@@ -412,49 +458,88 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
                wait_for_completion(&barr.done);
 }
 
-/**
- * flush_work - block until a work_struct's callback has terminated
- * @wq: the workqueue on which the work is queued
- * @work: the work which is to be flushed
- *
- * flush_work() will attempt to cancel the work if it is queued.  If the work's
- * callback appears to be running, flush_work() will block until it has
- * completed.
- *
- * flush_work() is designed to be used when the caller is tearing down data
- * structures which the callback function operates upon.  It is expected that,
- * prior to calling flush_work(), the caller has arranged for the work to not
- * be requeued.
- */
-void flush_work(struct workqueue_struct *wq, struct work_struct *work)
+static void wait_on_work(struct work_struct *work)
 {
-       const cpumask_t *cpu_map = wq_cpu_map(wq);
        struct cpu_workqueue_struct *cwq;
+       struct workqueue_struct *wq;
+       const cpumask_t *cpu_map;
        int cpu;
 
        might_sleep();
 
+       lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_release(&work->lockdep_map, 1, _THIS_IP_);
+
        cwq = get_wq_data(work);
-       /* Was it ever queued ? */
        if (!cwq)
                return;
 
-       /*
-        * This work can't be re-queued, no need to re-check that
-        * get_wq_data() is still the same when we take cwq->lock.
-        */
-       spin_lock_irq(&cwq->lock);
-       list_del_init(&work->entry);
-       work_clear_pending(work);
-       spin_unlock_irq(&cwq->lock);
+       wq = cwq->wq;
+       cpu_map = wq_cpu_map(wq);
 
        for_each_cpu_mask(cpu, *cpu_map)
-               wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+               wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+}
+
+static int __cancel_work_timer(struct work_struct *work,
+                               struct timer_list* timer)
+{
+       int ret;
+
+       do {
+               ret = (timer && likely(del_timer(timer)));
+               if (!ret)
+                       ret = try_to_grab_pending(work);
+               wait_on_work(work);
+       } while (unlikely(ret < 0));
+
+       work_clear_pending(work);
+       return ret;
+}
+
+/**
+ * cancel_work_sync - block until a work_struct's callback has terminated
+ * @work: the work which is to be flushed
+ *
+ * Returns true if @work was pending.
+ *
+ * cancel_work_sync() will cancel the work if it is queued. If the work's
+ * callback appears to be running, cancel_work_sync() will block until it
+ * has completed.
+ *
+ * It is possible to use this function if the work re-queues itself. It can
+ * cancel the work even if it migrates to another workqueue, however in that
+ * case it only guarantees that work->func() has completed on the last queued
+ * workqueue.
+ *
+ * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
+ * pending, otherwise it goes into a busy-wait loop until the timer expires.
+ *
+ * The caller must ensure that workqueue_struct on which this work was last
+ * queued can't be destroyed before this function returns.
+ */
+int cancel_work_sync(struct work_struct *work)
+{
+       return __cancel_work_timer(work, NULL);
 }
-EXPORT_SYMBOL_GPL(flush_work);
+EXPORT_SYMBOL_GPL(cancel_work_sync);
 
+/**
+ * cancel_delayed_work_sync - reliably kill off a delayed work.
+ * @dwork: the delayed work struct
+ *
+ * Returns true if @dwork was pending.
+ *
+ * It is possible to use this function if @dwork rearms itself via queue_work()
+ * or queue_delayed_work(). See also the comment for cancel_work_sync().
+ */
+int cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+       return __cancel_work_timer(&dwork->work, &dwork->timer);
+}
+EXPORT_SYMBOL(cancel_delayed_work_sync);
 
-static struct workqueue_struct *keventd_wq;
+static struct workqueue_struct *keventd_wq __read_mostly;
 
 /**
  * schedule_work - put work task in global workqueue
@@ -462,7 +547,7 @@ static struct workqueue_struct *keventd_wq;
  *
  * This puts a job in the kernel-global workqueue.
  */
-int fastcall schedule_work(struct work_struct *work)
+int schedule_work(struct work_struct *work)
 {
        return queue_work(keventd_wq, work);
 }
@@ -476,7 +561,7 @@ EXPORT_SYMBOL(schedule_work);
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct delayed_work *dwork,
+int schedule_delayed_work(struct delayed_work *dwork,
                                        unsigned long delay)
 {
        timer_stats_timer_set_start_info(&dwork->timer);
@@ -496,6 +581,7 @@ EXPORT_SYMBOL(schedule_delayed_work);
 int schedule_delayed_work_on(int cpu,
                        struct delayed_work *dwork, unsigned long delay)
 {
+       timer_stats_timer_set_start_info(&dwork->timer);
        return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
@@ -507,8 +593,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
  * Returns zero on success.
  * Returns -ve errno on failure.
  *
- * Appears to be racy against CPU hotplug.
- *
  * schedule_on_each_cpu() is very slow.
  */
 int schedule_on_each_cpu(work_func_t func)
@@ -520,7 +604,7 @@ int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
-       preempt_disable();              /* CPU hotplug */
+       get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
@@ -528,8 +612,8 @@ int schedule_on_each_cpu(work_func_t func)
                set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
                __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
        }
-       preempt_enable();
        flush_workqueue(keventd_wq);
+       put_online_cpus();
        free_percpu(works);
        return 0;
 }
@@ -540,33 +624,6 @@ void flush_scheduled_work(void)
 }
 EXPORT_SYMBOL(flush_scheduled_work);
 
-void flush_work_keventd(struct work_struct *work)
-{
-       flush_work(keventd_wq, work);
-}
-EXPORT_SYMBOL(flush_work_keventd);
-
-/**
- * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
- * @dwork: the delayed work struct
- *
- * Note that the work callback function may still be running on return from
- * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
- */
-void cancel_rearming_delayed_work(struct delayed_work *dwork)
-{
-       struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
-
-       /* Was it ever queued ? */
-       if (cwq != NULL) {
-               struct workqueue_struct *wq = cwq->wq;
-
-               while (!cancel_delayed_work(dwork))
-                       flush_workqueue(wq);
-       }
-}
-EXPORT_SYMBOL(cancel_rearming_delayed_work);
-
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:                the function to execute
@@ -601,7 +658,7 @@ int keventd_up(void)
 int current_is_keventd(void)
 {
        struct cpu_workqueue_struct *cwq;
-       int cpu = smp_processor_id();   /* preempt-safe: keventd is per-cpu */
+       int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
        int ret = 0;
 
        BUG_ON(!keventd_wq);
@@ -646,7 +703,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
                return PTR_ERR(p);
 
        cwq->thread = p;
-       cwq->should_stop = 0;
 
        return 0;
 }
@@ -662,8 +718,11 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
        }
 }
 
-struct workqueue_struct *__create_workqueue(const char *name,
-                                           int singlethread, int freezeable)
+struct workqueue_struct *__create_workqueue_key(const char *name,
+                                               int singlethread,
+                                               int freezeable,
+                                               struct lock_class_key *key,
+                                               const char *lock_name)
 {
        struct workqueue_struct *wq;
        struct cpu_workqueue_struct *cwq;
@@ -680,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        }
 
        wq->name = name;
+       lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        wq->singlethread = singlethread;
        wq->freezeable = freezeable;
        INIT_LIST_HEAD(&wq->list);
@@ -689,8 +749,10 @@ struct workqueue_struct *__create_workqueue(const char *name,
                err = create_workqueue_thread(cwq, singlethread_cpu);
                start_workqueue_thread(cwq, -1);
        } else {
-               mutex_lock(&workqueue_mutex);
+               get_online_cpus();
+               spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
+               spin_unlock(&workqueue_lock);
 
                for_each_possible_cpu(cpu) {
                        cwq = init_cpu_workqueue(wq, cpu);
@@ -699,7 +761,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
                        err = create_workqueue_thread(cwq, cpu);
                        start_workqueue_thread(cwq, cpu);
                }
-               mutex_unlock(&workqueue_mutex);
+               put_online_cpus();
        }
 
        if (err) {
@@ -708,33 +770,33 @@ struct workqueue_struct *__create_workqueue(const char *name,
        }
        return wq;
 }
-EXPORT_SYMBOL_GPL(__create_workqueue);
+EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
-       struct wq_barrier barr;
-       int alive = 0;
-
-       spin_lock_irq(&cwq->lock);
-       if (cwq->thread != NULL) {
-               insert_wq_barrier(cwq, &barr, 1);
-               cwq->should_stop = 1;
-               alive = 1;
-       }
-       spin_unlock_irq(&cwq->lock);
+       /*
+        * Our caller is either destroy_workqueue() or CPU_DEAD,
+        * get_online_cpus() protects cwq->thread.
+        */
+       if (cwq->thread == NULL)
+               return;
 
-       if (alive) {
-               wait_for_completion(&barr.done);
+       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
 
-               while (unlikely(cwq->thread != NULL))
-                       cpu_relax();
-               /*
-                * Wait until cwq->thread unlocks cwq->lock,
-                * it won't touch *cwq after that.
-                */
-               smp_rmb();
-               spin_unlock_wait(&cwq->lock);
-       }
+       flush_cpu_workqueue(cwq);
+       /*
+        * If the caller is CPU_DEAD and cwq->worklist was not empty,
+        * a concurrent flush_workqueue() can insert a barrier after us.
+        * However, in that case run_workqueue() won't return and check
+        * kthread_should_stop() until it flushes all work_struct's.
+        * When ->worklist becomes empty it is safe to exit because no
+        * more work_structs can be queued on this cwq: flush_workqueue
+        * checks list_empty(), and a "normal" queue_work() can't use
+        * a dead CPU.
+        */
+       kthread_stop(cwq->thread);
+       cwq->thread = NULL;
 }
 
 /**
@@ -749,9 +811,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
        struct cpu_workqueue_struct *cwq;
        int cpu;
 
-       mutex_lock(&workqueue_mutex);
+       get_online_cpus();
+       spin_lock(&workqueue_lock);
        list_del(&wq->list);
-       mutex_unlock(&workqueue_mutex);
+       spin_unlock(&workqueue_lock);
+       put_online_cpus();
 
        for_each_cpu_mask(cpu, *cpu_map) {
                cwq = per_cpu_ptr(wq->cpu_wq, cpu);
@@ -771,14 +835,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
 
-       switch (action) {
-       case CPU_LOCK_ACQUIRE:
-               mutex_lock(&workqueue_mutex);
-               return NOTIFY_OK;
+       action &= ~CPU_TASKS_FROZEN;
 
-       case CPU_LOCK_RELEASE:
-               mutex_unlock(&workqueue_mutex);
-               return NOTIFY_OK;
+       switch (action) {
 
        case CPU_UP_PREPARE:
                cpu_set(cpu, cpu_populated_map);
@@ -791,7 +850,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                case CPU_UP_PREPARE:
                        if (!create_workqueue_thread(cwq, cpu))
                                break;
-                       printk(KERN_ERR "workqueue for %i failed\n", cpu);
+                       printk(KERN_ERR "workqueue [%s] for %i failed\n",
+                               wq->name, cpu);
                        return NOTIFY_BAD;
 
                case CPU_ONLINE: