i2c: Get rid of struct i2c_client_address_data
[safe/jmp/linux-2.6] / drivers / md / dm.c
index 00c7688..724efc6 100644 (file)
@@ -47,6 +47,7 @@ struct dm_io {
        atomic_t io_count;
        struct bio *bio;
        unsigned long start_time;
+       spinlock_t endio_lock;
 };
 
 /*
@@ -130,7 +131,7 @@ struct mapped_device {
        /*
         * A list of ios that arrived while we were suspended.
         */
-       atomic_t pending;
+       atomic_t pending[2];
        wait_queue_head_t wait;
        struct work_struct work;
        struct bio_list deferred;
@@ -453,13 +454,14 @@ static void start_io_acct(struct dm_io *io)
 {
        struct mapped_device *md = io->md;
        int cpu;
+       int rw = bio_data_dir(io->bio);
 
        io->start_time = jiffies;
 
        cpu = part_stat_lock();
        part_round_stats(cpu, &dm_disk(md)->part0);
        part_stat_unlock();
-       dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
+       dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -479,8 +481,9 @@ static void end_io_acct(struct dm_io *io)
         * After this is decremented the bio must not be touched if it is
         * a barrier.
         */
-       dm_disk(md)->part0.in_flight = pending =
-               atomic_dec_return(&md->pending);
+       dm_disk(md)->part0.in_flight[rw] = pending =
+               atomic_dec_return(&md->pending[rw]);
+       pending += atomic_read(&md->pending[rw^0x1]);
 
        /* nudge anyone waiting on suspend queue */
        if (!pending)
@@ -512,12 +515,13 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
 struct dm_table *dm_get_table(struct mapped_device *md)
 {
        struct dm_table *t;
+       unsigned long flags;
 
-       read_lock(&md->map_lock);
+       read_lock_irqsave(&md->map_lock, flags);
        t = md->map;
        if (t)
                dm_table_get(t);
-       read_unlock(&md->map_lock);
+       read_unlock_irqrestore(&md->map_lock, flags);
 
        return t;
 }
@@ -575,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
        struct mapped_device *md = io->md;
 
        /* Push-back supersedes any I/O errors */
-       if (error && !(io->error > 0 && __noflush_suspending(md)))
-               io->error = error;
+       if (unlikely(error)) {
+               spin_lock_irqsave(&io->endio_lock, flags);
+               if (!(io->error > 0 && __noflush_suspending(md)))
+                       io->error = error;
+               spin_unlock_irqrestore(&io->endio_lock, flags);
+       }
 
        if (atomic_dec_and_test(&io->io_count)) {
                if (io->error == DM_ENDIO_REQUEUE) {
@@ -585,7 +593,7 @@ static void dec_pending(struct dm_io *io, int error)
                         */
                        spin_lock_irqsave(&md->deferred_lock, flags);
                        if (__noflush_suspending(md)) {
-                               if (!bio_barrier(io->bio))
+                               if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
                                        bio_list_add_head(&md->deferred,
                                                          io->bio);
                        } else
@@ -597,7 +605,7 @@ static void dec_pending(struct dm_io *io, int error)
                io_error = io->error;
                bio = io->bio;
 
-               if (bio_barrier(bio)) {
+               if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
                        /*
                         * There can be just one barrier request so we use
                         * a per-device variable for error reporting.
@@ -737,16 +745,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
        dm_put(md);
 }
 
+static void free_rq_clone(struct request *clone)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+
+       blk_rq_unprep_clone(clone);
+       free_rq_tio(tio);
+}
+
 static void dm_unprep_request(struct request *rq)
 {
        struct request *clone = rq->special;
-       struct dm_rq_target_io *tio = clone->end_io_data;
 
        rq->special = NULL;
        rq->cmd_flags &= ~REQ_DONTPREP;
 
-       blk_rq_unprep_clone(clone);
-       free_rq_tio(tio);
+       free_rq_clone(clone);
 }
 
 /*
@@ -824,8 +838,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       BUG_ON(clone->bio);
-       free_rq_tio(tio);
+       free_rq_clone(clone);
 
        blk_end_request_all(rq, error);
 
@@ -1016,7 +1029,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
        clone->bi_flags |= 1 << BIO_CLONED;
 
        if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO);
+               bio_integrity_clone(clone, bio, GFP_NOIO, bs);
                bio_integrity_trim(clone,
                                   bio_sector_offset(bio, idx, offset), len);
        }
@@ -1044,7 +1057,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
        clone->bi_flags &= ~(1 << BIO_SEG_VALID);
 
        if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO);
+               bio_integrity_clone(clone, bio, GFP_NOIO, bs);
 
                if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
                        bio_integrity_trim(clone,
@@ -1203,7 +1216,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 
        ci.map = dm_get_table(md);
        if (unlikely(!ci.map)) {
-               if (!bio_barrier(bio))
+               if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
                        bio_io_error(bio);
                else
                        if (!md->barrier_error)
@@ -1218,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        atomic_set(&ci.io->io_count, 1);
        ci.io->bio = bio;
        ci.io->md = md;
+       spin_lock_init(&ci.io->endio_lock);
        ci.sector = bio->bi_sector;
        ci.sector_count = bio_sectors(bio);
        if (unlikely(bio_empty_barrier(bio)))
@@ -1315,7 +1329,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
         * we have to queue this io for later.
         */
        if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
-           unlikely(bio_barrier(bio))) {
+           unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
                up_read(&md->io_lock);
 
                if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1338,7 +1352,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct mapped_device *md = q->queuedata;
 
-       if (unlikely(bio_barrier(bio))) {
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        }
@@ -1708,7 +1722,7 @@ out:
        return r;
 }
 
-static struct block_device_operations dm_blk_dops;
+static const struct block_device_operations dm_blk_dops;
 
 static void dm_wq_work(struct work_struct *work);
 
@@ -1779,7 +1793,8 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->disk)
                goto bad_disk;
 
-       atomic_set(&md->pending, 0);
+       atomic_set(&md->pending[0], 0);
+       atomic_set(&md->pending[1], 0);
        init_waitqueue_head(&md->wait);
        INIT_WORK(&md->work, dm_wq_work);
        init_waitqueue_head(&md->eventq);
@@ -1813,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
 bad_bdev:
        destroy_workqueue(md->wq);
 bad_thread:
+       del_gendisk(md->disk);
        put_disk(md->disk);
 bad_disk:
        blk_cleanup_queue(md->queue);
@@ -1910,6 +1926,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
 {
        struct request_queue *q = md->queue;
        sector_t size;
+       unsigned long flags;
 
        size = dm_table_get_size(t);
 
@@ -1940,10 +1957,10 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
 
        __bind_mempools(md, t);
 
-       write_lock(&md->map_lock);
+       write_lock_irqsave(&md->map_lock, flags);
        md->map = t;
        dm_table_set_restrictions(t, q, limits);
-       write_unlock(&md->map_lock);
+       write_unlock_irqrestore(&md->map_lock, flags);
 
        return 0;
 }
@@ -1951,14 +1968,15 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
 static void __unbind(struct mapped_device *md)
 {
        struct dm_table *map = md->map;
+       unsigned long flags;
 
        if (!map)
                return;
 
        dm_table_event_callback(map, NULL, NULL);
-       write_lock(&md->map_lock);
+       write_lock_irqsave(&md->map_lock, flags);
        md->map = NULL;
-       write_unlock(&md->map_lock);
+       write_unlock_irqrestore(&md->map_lock, flags);
        dm_table_destroy(map);
 }
 
@@ -2080,7 +2098,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
                                break;
                        }
                        spin_unlock_irqrestore(q->queue_lock, flags);
-               } else if (!atomic_read(&md->pending))
+               } else if (!atomic_read(&md->pending[0]) &&
+                                       !atomic_read(&md->pending[1]))
                        break;
 
                if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2156,7 +2175,7 @@ static void dm_wq_work(struct work_struct *work)
                if (dm_request_based(md))
                        generic_make_request(c);
                else {
-                       if (bio_barrier(c))
+                       if (bio_rw_flagged(c, BIO_RW_BARRIER))
                                process_barrier(md, c);
                        else
                                __split_and_process_bio(md, c);
@@ -2200,16 +2219,6 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
                goto out;
        }
 
-       /*
-        * It is enought that blk_queue_ordered() is called only once when
-        * the first bio-based table is bound.
-        *
-        * This setting should be moved to alloc_dev() when request-based dm
-        * supports barrier.
-        */
-       if (!md->map && dm_table_bio_based(table))
-               blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
-
        __unbind(md);
        r = __bind(md, table, &limits);
 
@@ -2661,7 +2670,7 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
        kfree(pools);
 }
 
-static struct block_device_operations dm_blk_dops = {
+static const struct block_device_operations dm_blk_dops = {
        .open = dm_blk_open,
        .release = dm_blk_close,
        .ioctl = dm_blk_ioctl,