md: don't insist on valid event count for spare devices.
[safe/jmp/linux-2.6] / drivers / md / dm-kcopyd.c
index ee9583b..addf834 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/device-mapper.h>
 #include <linux/dm-kcopyd.h>
 
 #include "dm.h"
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
        spin_unlock_irqrestore(&kc->job_lock, flags);
 }
 
+
+static void push_head(struct list_head *jobs, struct kcopyd_job *job)
+{
+       unsigned long flags;
+       struct dm_kcopyd_client *kc = job->kc;
+
+       spin_lock_irqsave(&kc->job_lock, flags);
+       list_add(&job->list, jobs);
+       spin_unlock_irqrestore(&kc->job_lock, flags);
+}
+
 /*
  * These three functions process 1 item from the corresponding
  * job list.
@@ -285,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job)
        dm_kcopyd_notify_fn fn = job->fn;
        struct dm_kcopyd_client *kc = job->kc;
 
-       kcopyd_put_pages(kc, job->pages);
+       if (job->pages)
+               kcopyd_put_pages(kc, job->pages);
        mempool_free(job, kc->job_pool);
        fn(read_err, write_err, context);
 
@@ -332,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = job->rw,
+               .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
                .mem.type = DM_IO_PAGE_LIST,
                .mem.ptr.pl = job->pages,
                .mem.offset = job->offset,
@@ -398,7 +411,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
                         * We couldn't service this job ATM, so
                         * push this job back onto the list.
                         */
-                       push(jobs, job);
+                       push_head(jobs, job);
                        break;
                }
 
@@ -437,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job)
 {
        struct dm_kcopyd_client *kc = job->kc;
        atomic_inc(&kc->nr_jobs);
-       push(&kc->pages_jobs, job);
+       if (unlikely(!job->source.count))
+               push(&kc->complete_jobs, job);
+       else
+               push(&kc->pages_jobs, job);
        wake(kc);
 }
 
@@ -449,6 +465,7 @@ static void segment_complete(int read_err, unsigned long write_err,
        sector_t progress = 0;
        sector_t count = 0;
        struct kcopyd_job *job = (struct kcopyd_job *) context;
+       struct dm_kcopyd_client *kc = job->kc;
 
        mutex_lock(&job->lock);
 
@@ -478,7 +495,7 @@ static void segment_complete(int read_err, unsigned long write_err,
 
        if (count) {
                int i;
-               struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
+               struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
                                                           GFP_NOIO);
 
                *sub_job = *job;
@@ -497,13 +514,16 @@ static void segment_complete(int read_err, unsigned long write_err,
        } else if (atomic_dec_and_test(&job->sub_jobs)) {
 
                /*
-                * To avoid a race we must keep the job around
-                * until after the notify function has completed.
-                * Otherwise the client may try and stop the job
-                * after we've completed.
+                * Queue the completion callback to the kcopyd thread.
+                *
+                * Some callers assume that all the completions are called
+                * from a single thread and don't race with each other.
+                *
+                * We must not call the callback directly here because this
+                * code may not be executing in the thread.
                 */
-               job->fn(read_err, write_err, job->context);
-               mempool_free(job, job->kc->job_pool);
+               push(&kc->complete_jobs, job);
+               wake(kc);
        }
 }
 
@@ -516,6 +536,8 @@ static void split_job(struct kcopyd_job *job)
 {
        int i;
 
+       atomic_inc(&job->kc->nr_jobs);
+
        atomic_set(&job->sub_jobs, SPLIT_COUNT);
        for (i = 0; i < SPLIT_COUNT; i++)
                segment_complete(0, 0u, job);