+ return is_wq_single_threaded(wq)
+ ? cpu_singlethread_map : cpu_populated_map;
+}
+
+static
+struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
+{
+ if (unlikely(is_wq_single_threaded(wq)))
+ cpu = singlethread_cpu;
+ return per_cpu_ptr(wq->cpu_wq, cpu);
+}
+
+/*
+ * Set the workqueue on which a work item is to be run
+ * - Must *only* be called if the pending flag is set
+ */
+static inline void set_wq_data(struct work_struct *work,
+ struct cpu_workqueue_struct *cwq)
+{
+ unsigned long new;
+
+ BUG_ON(!work_pending(work));
+
+ new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
+ new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
+ atomic_long_set(&work->data, new);
+}
+
+static inline
+struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
+{
+ return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
+}
+
+DEFINE_TRACE(workqueue_insertion);
+
+static void insert_work(struct cpu_workqueue_struct *cwq,
+ struct work_struct *work, struct list_head *head)
+{
+ trace_workqueue_insertion(cwq->thread, work);
+
+ set_wq_data(work, cwq);
+ /*
+ * Ensure that we get the right work->data if we see the
+ * result of list_add() below, see try_to_grab_pending().
+ */
+ smp_wmb();
+ list_add_tail(&work->entry, head);
+ wake_up(&cwq->more_work);