Revert "nfsd4: fix error return when pseudoroot missing"
[safe/jmp/linux-2.6] / kernel / workqueue.c
index b6b966c..67e526b 100644 (file)
@@ -33,7 +33,8 @@
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
-#include <trace/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -124,8 +125,6 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
        return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
 }
 
-DEFINE_TRACE(workqueue_insertion);
-
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head)
 {
@@ -262,8 +261,6 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
-DEFINE_TRACE(workqueue_execution);
-
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        spin_lock_irq(&cwq->lock);
@@ -320,8 +317,6 @@ static int worker_thread(void *__cwq)
        if (cwq->wq->freezeable)
                set_freezable();
 
-       set_user_nice(current, -5);
-
        for (;;) {
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
                if (!freezing(current) &&
@@ -603,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  *
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
 int schedule_work(struct work_struct *work)
 {
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
@@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
                return -ENOMEM;
 
        get_online_cpus();
+
+       /*
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
+        */
+       if (current_is_keventd())
+               orig = raw_smp_processor_id();
+
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
+               if (cpu != orig)
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
@@ -753,8 +786,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
        return cwq;
 }
 
-DEFINE_TRACE(workqueue_creation);
-
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -860,8 +891,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
-DEFINE_TRACE(workqueue_destruction);
-
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
@@ -966,20 +995,20 @@ undo:
 }
 
 #ifdef CONFIG_SMP
-static struct workqueue_struct *work_on_cpu_wq __read_mostly;
 
 struct work_for_cpu {
-       struct work_struct work;
+       struct completion completion;
        long (*fn)(void *);
        void *arg;
        long ret;
 };
 
-static void do_work_for_cpu(struct work_struct *w)
+static int do_work_for_cpu(void *_wfc)
 {
-       struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
-
+       struct work_for_cpu *wfc = _wfc;
        wfc->ret = wfc->fn(wfc->arg);
+       complete(&wfc->completion);
+       return 0;
 }
 
 /**
@@ -990,17 +1019,23 @@ static void do_work_for_cpu(struct work_struct *w)
  *
  * This will return the value @fn returns.
  * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
  */
 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
-       struct work_for_cpu wfc;
-
-       INIT_WORK(&wfc.work, do_work_for_cpu);
-       wfc.fn = fn;
-       wfc.arg = arg;
-       queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
-       flush_work(&wfc.work);
-
+       struct task_struct *sub_thread;
+       struct work_for_cpu wfc = {
+               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+               .fn = fn,
+               .arg = arg,
+       };
+
+       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+       if (IS_ERR(sub_thread))
+               return PTR_ERR(sub_thread);
+       kthread_bind(sub_thread, cpu);
+       wake_up_process(sub_thread);
+       wait_for_completion(&wfc.completion);
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -1016,8 +1051,4 @@ void __init init_workqueues(void)
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);
-#ifdef CONFIG_SMP
-       work_on_cpu_wq = create_workqueue("work_on_cpu");
-       BUG_ON(!work_on_cpu_wq);
-#endif
 }