Revert "swsusp special saveable pages support" commits
[safe/jmp/linux-2.6] / kernel / workqueue.c
index 42df83d..565cf7a 100644 (file)
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
+#include <linux/hardirq.h>
 
 /*
- * The per-CPU workqueue (if single thread, we always use cpu 0's).
+ * The per-CPU workqueue (if single thread, we always use the first
+ * possible cpu).
  *
  * The sequence counters are for flush_scheduled_work().  It wants to wait
  * until until all currently-scheduled works are completed, but it doesn't
@@ -69,6 +71,8 @@ struct workqueue_struct {
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 
+static int singlethread_cpu;
+
 /* If it's single threaded, it isn't in the list of workqueues. */
 static inline int is_single_threaded(struct workqueue_struct *wq)
 {
@@ -102,7 +106,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 
        if (!test_and_set_bit(0, &work->pending)) {
                if (unlikely(is_single_threaded(wq)))
-                       cpu = 0;
+                       cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
                __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
                ret = 1;
@@ -118,7 +122,7 @@ static void delayed_work_timer_fn(unsigned long __data)
        int cpu = smp_processor_id();
 
        if (unlikely(is_single_threaded(wq)))
-               cpu = 0;
+               cpu = singlethread_cpu;
 
        __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
@@ -144,7 +148,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
        return ret;
 }
 
-static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
+static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        unsigned long flags;
 
@@ -266,8 +270,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
        might_sleep();
 
        if (is_single_threaded(wq)) {
-               /* Always use cpu 0's area. */
-               flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
+               /* Always use first cpu's area. */
+               flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
        } else {
                int cpu;
 
@@ -315,12 +319,17 @@ struct workqueue_struct *__create_workqueue(const char *name,
                return NULL;
 
        wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
+       if (!wq->cpu_wq) {
+               kfree(wq);
+               return NULL;
+       }
+
        wq->name = name;
        /* We don't need the distraction of CPUs appearing and vanishing. */
        lock_cpu_hotplug();
        if (singlethread) {
                INIT_LIST_HEAD(&wq->list);
-               p = create_workqueue_thread(wq, 0);
+               p = create_workqueue_thread(wq, singlethread_cpu);
                if (!p)
                        destroy = 1;
                else
@@ -374,7 +383,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        /* We don't need the distraction of CPUs appearing and vanishing. */
        lock_cpu_hotplug();
        if (is_single_threaded(wq))
-               cleanup_workqueue_thread(wq, 0);
+               cleanup_workqueue_thread(wq, singlethread_cpu);
        else {
                for_each_online_cpu(cpu)
                        cleanup_workqueue_thread(wq, cpu);
@@ -419,6 +428,37 @@ int schedule_delayed_work_on(int cpu,
        return ret;
 }
 
+/**
+ * schedule_on_each_cpu - call a function on each online CPU from keventd
+ * @func: the function to call
+ * @info: a pointer to pass to func()
+ *
+ * Returns zero on success.
+ * Returns -ve errno on failure.
+ *
+ * Appears to be racy against CPU hotplug.
+ *
+ * schedule_on_each_cpu() is very slow.
+ */
+int schedule_on_each_cpu(void (*func)(void *info), void *info)
+{
+       int cpu;
+       struct work_struct *works;
+
+       works = alloc_percpu(struct work_struct);
+       if (!works)
+               return -ENOMEM;
+
+       for_each_online_cpu(cpu) {
+               INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
+                               per_cpu_ptr(works, cpu));
+       }
+       flush_workqueue(keventd_wq);
+       free_percpu(works);
+       return 0;
+}
+
 void flush_scheduled_work(void)
 {
        flush_workqueue(keventd_wq);
@@ -449,6 +489,34 @@ void cancel_rearming_delayed_work(struct work_struct *work)
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
+/**
+ * execute_in_process_context - reliably execute the routine with user context
+ * @fn:                the function to execute
+ * @data:      data to pass to the function
+ * @ew:                guaranteed storage for the execute work structure (must
+ *             be available when the work executes)
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns:    0 - function was executed
+ *             1 - function was scheduled for execution
+ */
+int execute_in_process_context(void (*fn)(void *data), void *data,
+                              struct execute_work *ew)
+{
+       if (!in_interrupt()) {
+               fn(data);
+               return 0;
+       }
+
+       INIT_WORK(&ew->work, fn, data);
+       schedule_work(&ew->work);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(execute_in_process_context);
+
 int keventd_up(void)
 {
        return keventd_wq != NULL;
@@ -475,11 +543,11 @@ int current_is_keventd(void)
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-       LIST_HEAD(list);
+       struct list_head list;
        struct work_struct *work;
 
        spin_lock_irq(&cwq->lock);
-       list_splice_init(&cwq->worklist, &list);
+       list_replace_init(&cwq->worklist, &list);
 
        while (!list_empty(&list)) {
                printk("Taking work for %s\n", wq->name);
@@ -491,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 }
 
 /* We're holding the cpucontrol mutex here */
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int workqueue_cpu_callback(struct notifier_block *nfb,
                                  unsigned long action,
                                  void *hcpu)
 {
@@ -522,6 +590,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
        case CPU_UP_CANCELED:
                list_for_each_entry(wq, &workqueues, list) {
+                       if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
+                               continue;
                        /* Unbind so it can run. */
                        kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
                                     any_online_cpu(cpu_online_map));
@@ -543,6 +613,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
 void init_workqueues(void)
 {
+       singlethread_cpu = first_cpu(cpu_possible_map);
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);