tracing/kprobes: Fix field creation's bad error handling
[safe/jmp/linux-2.6] / kernel / workqueue.c
index 3003eca..1232814 100644 (file)
@@ -33,7 +33,8 @@
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
-#include <trace/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -49,8 +50,6 @@ struct cpu_workqueue_struct {
 
        struct workqueue_struct *wq;
        struct task_struct *thread;
-
-       int run_depth;          /* Detect run_workqueue() recursion depth */
 } ____cacheline_aligned;
 
 /*
@@ -126,8 +125,6 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
        return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
 }
 
-DEFINE_TRACE(workqueue_insertion);
-
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head)
 {
@@ -264,18 +261,9 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
-DEFINE_TRACE(workqueue_execution);
-
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        spin_lock_irq(&cwq->lock);
-       cwq->run_depth++;
-       if (cwq->run_depth > 3) {
-               /* morton gets to eat his hat */
-               printk("%s: recursion depth exceeded: %d\n",
-                       __func__, cwq->run_depth);
-               dump_stack();
-       }
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
@@ -318,7 +306,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                spin_lock_irq(&cwq->lock);
                cwq->current_work = NULL;
        }
-       cwq->run_depth--;
        spin_unlock_irq(&cwq->lock);
 }
 
@@ -330,8 +317,6 @@ static int worker_thread(void *__cwq)
        if (cwq->wq->freezeable)
                set_freezable();
 
-       set_user_nice(current, -5);
-
        for (;;) {
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
                if (!freezing(current) &&
@@ -375,29 +360,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
 
 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
 {
-       int active;
-
-       if (cwq->thread == current) {
-               /*
-                * Probably keventd trying to flush its own queue. So simply run
-                * it by hand rather than deadlocking.
-                */
-               run_workqueue(cwq);
-               active = 1;
-       } else {
-               struct wq_barrier barr;
+       int active = 0;
+       struct wq_barrier barr;
 
-               active = 0;
-               spin_lock_irq(&cwq->lock);
-               if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
-                       insert_wq_barrier(cwq, &barr, &cwq->worklist);
-                       active = 1;
-               }
-               spin_unlock_irq(&cwq->lock);
+       WARN_ON(cwq->thread == current);
 
-               if (active)
-                       wait_for_completion(&barr.done);
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
+               insert_wq_barrier(cwq, &barr, &cwq->worklist);
+               active = 1;
        }
+       spin_unlock_irq(&cwq->lock);
+
+       if (active)
+               wait_for_completion(&barr.done);
 
        return active;
 }
@@ -622,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  *
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
 int schedule_work(struct work_struct *work)
 {
@@ -659,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -686,21 +685,38 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
        if (!works)
                return -ENOMEM;
 
+       /*
+        * when running in keventd don't schedule a work item on itself.
+        * Can just call directly because the work queue is already bound.
+        * This also is faster.
+        * Make this a generic parameter for other workqueues?
+        */
+       if (current_is_keventd()) {
+               orig = raw_smp_processor_id();
+               INIT_WORK(per_cpu_ptr(works, orig), func);
+               func(per_cpu_ptr(works, orig));
+       }
+
        get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
+               if (cpu == orig)
+                       continue;
                INIT_WORK(work, func);
                schedule_work_on(cpu, work);
        }
-       for_each_online_cpu(cpu)
-               flush_work(per_cpu_ptr(works, cpu));
+       for_each_online_cpu(cpu) {
+               if (cpu != orig)
+                       flush_work(per_cpu_ptr(works, cpu));
+       }
        put_online_cpus();
        free_percpu(works);
        return 0;
@@ -772,8 +788,6 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
        return cwq;
 }
 
-DEFINE_TRACE(workqueue_creation);
-
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -879,8 +893,6 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
-DEFINE_TRACE(workqueue_destruction);
-
 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
@@ -985,20 +997,20 @@ undo:
 }
 
 #ifdef CONFIG_SMP
-static struct workqueue_struct *work_on_cpu_wq __read_mostly;
 
 struct work_for_cpu {
-       struct work_struct work;
+       struct completion completion;
        long (*fn)(void *);
        void *arg;
        long ret;
 };
 
-static void do_work_for_cpu(struct work_struct *w)
+static int do_work_for_cpu(void *_wfc)
 {
-       struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
-
+       struct work_for_cpu *wfc = _wfc;
        wfc->ret = wfc->fn(wfc->arg);
+       complete(&wfc->completion);
+       return 0;
 }
 
 /**
@@ -1009,17 +1021,23 @@ static void do_work_for_cpu(struct work_struct *w)
  *
  * This will return the value @fn returns.
  * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
  */
 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
-       struct work_for_cpu wfc;
-
-       INIT_WORK(&wfc.work, do_work_for_cpu);
-       wfc.fn = fn;
-       wfc.arg = arg;
-       queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
-       flush_work(&wfc.work);
-
+       struct task_struct *sub_thread;
+       struct work_for_cpu wfc = {
+               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+               .fn = fn,
+               .arg = arg,
+       };
+
+       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+       if (IS_ERR(sub_thread))
+               return PTR_ERR(sub_thread);
+       kthread_bind(sub_thread, cpu);
+       wake_up_process(sub_thread);
+       wait_for_completion(&wfc.completion);
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -1035,8 +1053,4 @@ void __init init_workqueues(void)
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);
-#ifdef CONFIG_SMP
-       work_on_cpu_wq = create_workqueue("work_on_cpu");
-       BUG_ON(!work_on_cpu_wq);
-#endif
 }