mmc: s3c6410: enable ADMA feature in 6410 sdhci controller
[safe/jmp/linux-2.6] / kernel / workqueue.c
index ddad63f..77dabbf 100644 (file)
@@ -229,6 +229,16 @@ static inline void set_wq_data(struct work_struct *work,
        atomic_long_set(&work->data, new);
 }
 
+/*
+ * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
+ */
+static inline void clear_wq_data(struct work_struct *work)
+{
+       unsigned long flags = *work_data_bits(work) &
+                               (1UL << WORK_STRUCT_STATIC);
+       atomic_long_set(&work->data, flags);
+}
+
 static inline
 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
 {
@@ -671,7 +681,7 @@ static int __cancel_work_timer(struct work_struct *work,
                wait_on_work(work);
        } while (unlikely(ret < 0));
 
-       work_clear_pending(work);
+       clear_wq_data(work);
        return ret;
 }
 
@@ -774,7 +784,7 @@ void flush_delayed_work(struct delayed_work *dwork)
 {
        if (del_timer_sync(&dwork->timer)) {
                struct cpu_workqueue_struct *cwq;
-               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
                __queue_work(cwq, &dwork->work);
                put_cpu();
        }
@@ -817,36 +827,58 @@ int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
+       get_online_cpus();
+
        /*
-        * when running in keventd don't schedule a work item on itself.
-        * Can just call directly because the work queue is already bound.
-        * This also is faster.
-        * Make this a generic parameter for other workqueues?
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
         */
-       if (current_is_keventd()) {
+       if (current_is_keventd())
                orig = raw_smp_processor_id();
-               INIT_WORK(per_cpu_ptr(works, orig), func);
-               func(per_cpu_ptr(works, orig));
-       }
 
-       get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
-               if (cpu == orig)
-                       continue;
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
-       }
-       for_each_online_cpu(cpu) {
                if (cpu != orig)
-                       flush_work(per_cpu_ptr(works, cpu));
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
+       for_each_online_cpu(cpu)
+               flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
 }
 
+/**
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
+ *
+ * Forces execution of the kernel-global workqueue and blocks until its
+ * completion.
+ *
+ * Think twice before calling this function!  It's very easy to get into
+ * trouble if you don't take great care.  Either of the following situations
+ * will lead to deadlock:
+ *
+ *     One of the work items currently on the workqueue needs to acquire
+ *     a lock held by your code or its caller.
+ *
+ *     Your code is running in the context of a work routine.
+ *
+ * They will be detected by lockdep when they occur, but the first might not
+ * occur very often.  It depends on what work items are on the workqueue and
+ * what locks they need, which you have no control over.
+ *
+ * In most situations flushing the entire workqueue is overkill; you merely
+ * need to know that a particular work item isn't queued and isn't running.
+ * In such cases you should use cancel_delayed_work_sync() or
+ * cancel_work_sync() instead.
+ */
 void flush_scheduled_work(void)
 {
        flush_workqueue(keventd_wq);