Revert "nfsd4: fix error return when pseudoroot missing"
[safe/jmp/linux-2.6] / kernel / workqueue.c
index 1f0c509..67e526b 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -48,8 +50,6 @@ struct cpu_workqueue_struct {
 
        struct workqueue_struct *wq;
        struct task_struct *thread;
-
-       int run_depth;          /* Detect run_workqueue() recursion depth */
 } ____cacheline_aligned;
 
 /*
@@ -128,6 +128,8 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head)
 {
+       trace_workqueue_insertion(cwq->thread, work);
+
        set_wq_data(work, cwq);
        /*
         * Ensure that we get the right work->data if we see the
@@ -262,13 +264,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        spin_lock_irq(&cwq->lock);
-       cwq->run_depth++;
-       if (cwq->run_depth > 3) {
-               /* morton gets to eat his hat */
-               printk("%s: recursion depth exceeded: %d\n",
-                       __func__, cwq->run_depth);
-               dump_stack();
-       }
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
@@ -284,7 +279,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                 */
                struct lockdep_map lockdep_map = work->lockdep_map;
 #endif
-
+               trace_workqueue_execution(cwq->thread, work);
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
                spin_unlock_irq(&cwq->lock);
@@ -311,7 +306,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                spin_lock_irq(&cwq->lock);
                cwq->current_work = NULL;
        }
-       cwq->run_depth--;
        spin_unlock_irq(&cwq->lock);
 }
 
@@ -323,8 +317,6 @@ static int worker_thread(void *__cwq)
        if (cwq->wq->freezeable)
                set_freezable();
 
-       set_user_nice(current, -5);
-
        for (;;) {
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
                if (!freezing(current) &&
@@ -368,29 +360,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
 
 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
 {
-       int active;
-
-       if (cwq->thread == current) {
-               /*
-                * Probably keventd trying to flush its own queue. So simply run
-                * it by hand rather than deadlocking.
-                */
-               run_workqueue(cwq);
-               active = 1;
-       } else {
-               struct wq_barrier barr;
+       int active = 0;
+       struct wq_barrier barr;
 
-               active = 0;
-               spin_lock_irq(&cwq->lock);
-               if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
-                       insert_wq_barrier(cwq, &barr, &cwq->worklist);
-                       active = 1;
-               }
-               spin_unlock_irq(&cwq->lock);
+       WARN_ON(cwq->thread == current);
 
-               if (active)
-                       wait_for_completion(&barr.done);
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
+               insert_wq_barrier(cwq, &barr, &cwq->worklist);
+               active = 1;
        }
+       spin_unlock_irq(&cwq->lock);
+
+       if (active)
+               wait_for_completion(&barr.done);
 
        return active;
 }
@@ -416,7 +399,7 @@ void flush_workqueue(struct workqueue_struct *wq)
        might_sleep();
        lock_map_acquire(&wq->lockdep_map);
        lock_map_release(&wq->lockdep_map);
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       for_each_cpu(cpu, cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -547,7 +530,7 @@ static void wait_on_work(struct work_struct *work)
        wq = cwq->wq;
        cpu_map = wq_cpu_map(wq);
 
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       for_each_cpu(cpu, cpu_map)
                wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
 
@@ -615,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  *
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
 int schedule_work(struct work_struct *work)
 {
@@ -652,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -679,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
@@ -686,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
                return -ENOMEM;
 
        get_online_cpus();
+
+       /*
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
+        */
+       if (current_is_keventd())
+               orig = raw_smp_processor_id();
+
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
+               if (cpu != orig)
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
@@ -787,6 +808,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
                sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
        cwq->thread = p;
 
+       trace_workqueue_creation(cwq->thread, cpu);
+
        return 0;
 }
 
@@ -891,6 +914,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
         * checks list_empty(), and a "normal" queue_work() can't use
         * a dead CPU.
         */
+       trace_workqueue_destruction(cwq->thread);
        kthread_stop(cwq->thread);
        cwq->thread = NULL;
 }
@@ -911,7 +935,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       for_each_cpu(cpu, cpu_map)
                cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
        cpu_maps_update_done();
 
@@ -971,20 +995,20 @@ undo:
 }
 
 #ifdef CONFIG_SMP
-static struct workqueue_struct *work_on_cpu_wq __read_mostly;
 
 struct work_for_cpu {
-       struct work_struct work;
+       struct completion completion;
        long (*fn)(void *);
        void *arg;
        long ret;
 };
 
-static void do_work_for_cpu(struct work_struct *w)
+static int do_work_for_cpu(void *_wfc)
 {
-       struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
-
+       struct work_for_cpu *wfc = _wfc;
        wfc->ret = wfc->fn(wfc->arg);
+       complete(&wfc->completion);
+       return 0;
 }
 
 /**
@@ -995,17 +1019,23 @@ static void do_work_for_cpu(struct work_struct *w)
  *
  * This will return the value @fn returns.
  * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
  */
 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
-       struct work_for_cpu wfc;
-
-       INIT_WORK(&wfc.work, do_work_for_cpu);
-       wfc.fn = fn;
-       wfc.arg = arg;
-       queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
-       flush_work(&wfc.work);
-
+       struct task_struct *sub_thread;
+       struct work_for_cpu wfc = {
+               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+               .fn = fn,
+               .arg = arg,
+       };
+
+       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+       if (IS_ERR(sub_thread))
+               return PTR_ERR(sub_thread);
+       kthread_bind(sub_thread, cpu);
+       wake_up_process(sub_thread);
+       wait_for_completion(&wfc.completion);
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -1021,8 +1051,4 @@ void __init init_workqueues(void)
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);
-#ifdef CONFIG_SMP
-       work_on_cpu_wq = create_workqueue("work_on_cpu");
-       BUG_ON(!work_on_cpu_wq);
-#endif
 }