ray_cs: convert to proc_fops
[safe/jmp/linux-2.6] / kernel / workqueue.c
index ea1b4e7..67e526b 100644 (file)
@@ -598,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  *
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
 int schedule_work(struct work_struct *work)
 {
@@ -635,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -662,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
@@ -669,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
                return -ENOMEM;
 
        get_online_cpus();
+
+       /*
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
+        */
+       if (current_is_keventd())
+               orig = raw_smp_processor_id();
+
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
+               if (cpu != orig)
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;