+struct workqueue_struct *__create_workqueue_key(const char *name,
+ int singlethread,
+ int freezeable,
+ int rt,
+ struct lock_class_key *key,
+ const char *lock_name)
+{
+ struct workqueue_struct *wq;
+ struct cpu_workqueue_struct *cwq;
+ int err = 0, cpu;
+
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (!wq)
+ return NULL;
+
+ wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
+ if (!wq->cpu_wq) {
+ kfree(wq);
+ return NULL;
+ }
+
+ wq->name = name;
+ lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
+ wq->singlethread = singlethread;
+ wq->freezeable = freezeable;
+ wq->rt = rt;
+ INIT_LIST_HEAD(&wq->list);
+
+ if (singlethread) {
+ cwq = init_cpu_workqueue(wq, singlethread_cpu);
+ err = create_workqueue_thread(cwq, singlethread_cpu);
+ start_workqueue_thread(cwq, -1);
+ } else {
+ cpu_maps_update_begin();
+ /*
+ * We must place this wq on list even if the code below fails.
+ * cpu_down(cpu) can remove cpu from cpu_populated_map before
+ * destroy_workqueue() takes the lock, in that case we leak
+ * cwq[cpu]->thread.
+ */
+ spin_lock(&workqueue_lock);
+ list_add(&wq->list, &workqueues);
+ spin_unlock(&workqueue_lock);
+ /*
+ * We must initialize cwqs for each possible cpu even if we
+ * are going to call destroy_workqueue() finally. Otherwise
+ * cpu_up() can hit the uninitialized cwq once we drop the
+ * lock.
+ */
+ for_each_possible_cpu(cpu) {
+ cwq = init_cpu_workqueue(wq, cpu);
+ if (err || !cpu_online(cpu))
+ continue;
+ err = create_workqueue_thread(cwq, cpu);
+ start_workqueue_thread(cwq, cpu);
+ }
+ cpu_maps_update_done();
+ }
+
+ if (err) {
+ destroy_workqueue(wq);
+ wq = NULL;
+ }
+ return wq;
+}
+EXPORT_SYMBOL_GPL(__create_workqueue_key);
+
+static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
+{
+ /*
+ * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
+ * cpu_add_remove_lock protects cwq->thread.
+ */
+ if (cwq->thread == NULL)
+ return;
+
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
+
+ flush_cpu_workqueue(cwq);
+ /*
+ * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
+ * a concurrent flush_workqueue() can insert a barrier after us.
+ * However, in that case run_workqueue() won't return and check
+ * kthread_should_stop() until it flushes all work_struct's.
+ * When ->worklist becomes empty it is safe to exit because no
+ * more work_structs can be queued on this cwq: flush_workqueue
+ * checks list_empty(), and a "normal" queue_work() can't use
+ * a dead CPU.
+ */
+ kthread_stop(cwq->thread);
+ cwq->thread = NULL;
+}
+
+/**
+ * destroy_workqueue - safely terminate a workqueue
+ * @wq: target workqueue
+ *
+ * Safely destroy a workqueue. All work currently pending will be done first.
+ */
+void destroy_workqueue(struct workqueue_struct *wq)
+{
+ const cpumask_t *cpu_map = wq_cpu_map(wq);
+ int cpu;
+
+ cpu_maps_update_begin();
+ spin_lock(&workqueue_lock);
+ list_del(&wq->list);
+ spin_unlock(&workqueue_lock);
+
+ for_each_cpu_mask_nr(cpu, *cpu_map)
+ cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
+ cpu_maps_update_done();
+
+ free_percpu(wq->cpu_wq);
+ kfree(wq);
+}
+EXPORT_SYMBOL_GPL(destroy_workqueue);
+