mmc: s3c6410: enable ADMA feature in 6410 sdhci controller
[safe/jmp/linux-2.6] / kernel / workqueue.c
index addfe2d..77dabbf 100644 (file)
@@ -68,6 +68,116 @@ struct workqueue_struct {
 #endif
 };
 
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+static struct debug_obj_descr work_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int work_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               cancel_work_sync(work);
+               debug_object_init(work, &work_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int work_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               /*
+                * This is not really a fixup. The work struct was
+                * statically initialized. We just make sure that it
+                * is tracked in the object tracker.
+                */
+               if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
+                       debug_object_init(work, &work_debug_descr);
+                       debug_object_activate(work, &work_debug_descr);
+                       return 0;
+               }
+               WARN_ON_ONCE(1);
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int work_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               cancel_work_sync(work);
+               debug_object_free(work, &work_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr work_debug_descr = {
+       .name           = "work_struct",
+       .fixup_init     = work_fixup_init,
+       .fixup_activate = work_fixup_activate,
+       .fixup_free     = work_fixup_free,
+};
+
+static inline void debug_work_activate(struct work_struct *work)
+{
+       debug_object_activate(work, &work_debug_descr);
+}
+
+static inline void debug_work_deactivate(struct work_struct *work)
+{
+       debug_object_deactivate(work, &work_debug_descr);
+}
+
+void __init_work(struct work_struct *work, int onstack)
+{
+       if (onstack)
+               debug_object_init_on_stack(work, &work_debug_descr);
+       else
+               debug_object_init(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(__init_work);
+
+void destroy_work_on_stack(struct work_struct *work)
+{
+       debug_object_free(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_work_on_stack);
+
+#else
+static inline void debug_work_activate(struct work_struct *work) { }
+static inline void debug_work_deactivate(struct work_struct *work) { }
+#endif
+
 /* Serializes the accesses to the list of workqueues. */
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
@@ -119,6 +229,16 @@ static inline void set_wq_data(struct work_struct *work,
        atomic_long_set(&work->data, new);
 }
 
+/*
+ * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
+ */
+static inline void clear_wq_data(struct work_struct *work)
+{
+       unsigned long flags = *work_data_bits(work) &
+                               (1UL << WORK_STRUCT_STATIC);
+       atomic_long_set(&work->data, flags);
+}
+
 static inline
 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
 {
@@ -145,6 +265,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
 {
        unsigned long flags;
 
+       debug_work_activate(work);
        spin_lock_irqsave(&cwq->lock, flags);
        insert_work(cwq, work, &cwq->worklist);
        spin_unlock_irqrestore(&cwq->lock, flags);
@@ -280,6 +401,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                struct lockdep_map lockdep_map = work->lockdep_map;
 #endif
                trace_workqueue_execution(cwq->thread, work);
+               debug_work_deactivate(work);
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
                spin_unlock_irq(&cwq->lock);
@@ -350,11 +472,18 @@ static void wq_barrier_func(struct work_struct *work)
 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
                        struct wq_barrier *barr, struct list_head *head)
 {
-       INIT_WORK(&barr->work, wq_barrier_func);
+       /*
+        * debugobject calls are safe here even with cwq->lock locked
+        * as we know for sure that this will not trigger any of the
+        * checks and call back into the fixup functions where we
+        * might deadlock.
+        */
+       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
 
        init_completion(&barr->done);
 
+       debug_work_activate(&barr->work);
        insert_work(cwq, &barr->work, head);
 }
 
@@ -372,8 +501,10 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
        }
        spin_unlock_irq(&cwq->lock);
 
-       if (active)
+       if (active) {
                wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+       }
 
        return active;
 }
@@ -451,6 +582,7 @@ out:
                return 0;
 
        wait_for_completion(&barr.done);
+       destroy_work_on_stack(&barr.work);
        return 1;
 }
 EXPORT_SYMBOL_GPL(flush_work);
@@ -485,6 +617,7 @@ static int try_to_grab_pending(struct work_struct *work)
                 */
                smp_rmb();
                if (cwq == get_wq_data(work)) {
+                       debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        ret = 1;
                }
@@ -507,8 +640,10 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
        }
        spin_unlock_irq(&cwq->lock);
 
-       if (unlikely(running))
+       if (unlikely(running)) {
                wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+       }
 }
 
 static void wait_on_work(struct work_struct *work)
@@ -546,7 +681,7 @@ static int __cancel_work_timer(struct work_struct *work,
                wait_on_work(work);
        } while (unlikely(ret < 0));
 
-       work_clear_pending(work);
+       clear_wq_data(work);
        return ret;
 }
 
@@ -640,6 +775,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -667,6 +820,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
@@ -674,19 +828,57 @@ int schedule_on_each_cpu(work_func_t func)
                return -ENOMEM;
 
        get_online_cpus();
+
+       /*
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
+        */
+       if (current_is_keventd())
+               orig = raw_smp_processor_id();
+
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
+               if (cpu != orig)
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
 }
 
+/**
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
+ *
+ * Forces execution of the kernel-global workqueue and blocks until its
+ * completion.
+ *
+ * Think twice before calling this function!  It's very easy to get into
+ * trouble if you don't take great care.  Either of the following situations
+ * will lead to deadlock:
+ *
+ *     One of the work items currently on the workqueue needs to acquire
+ *     a lock held by your code or its caller.
+ *
+ *     Your code is running in the context of a work routine.
+ *
+ * They will be detected by lockdep when they occur, but the first might not
+ * occur very often.  It depends on what work items are on the workqueue and
+ * what locks they need, which you have no control over.
+ *
+ * In most situations flushing the entire workqueue is overkill; you merely
+ * need to know that a particular work item isn't queued and isn't running.
+ * In such cases you should use cancel_delayed_work_sync() or
+ * cancel_work_sync() instead.
+ */
 void flush_scheduled_work(void)
 {
        flush_workqueue(keventd_wq);