dm: add dm_deleting_md function
[safe/jmp/linux-2.6] / drivers / md / raid10.c
index 9a5beb4..c2cb7b8 100644 (file)
@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 
        /* allocate a r10bio with room for raid_disks entries in the bios array */
        r10_bio = kzalloc(size, gfp_flags);
-       if (!r10_bio)
+       if (!r10_bio && conf->mddev)
                unplug_slaves(conf->mddev);
 
        return r10_bio;
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
        mddev_t *mddev = q->queuedata;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -631,6 +631,8 @@ static int raid10_congested(void *data, int bits)
        conf_t *conf = mddev->private;
        int i, ret = 0;
 
+       if (mddev_congested(mddev, bits))
+               return 1;
        rcu_read_lock();
        for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
@@ -796,12 +798,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const int do_sync = bio_sync(bio);
+       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
        struct bio_list bl;
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
 
-       if (unlikely(bio_barrier(bio))) {
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        }
@@ -882,7 +884,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | do_sync;
+               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
                read_bio->bi_private = r10_bio;
 
                generic_make_request(read_bio);
@@ -950,7 +952,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
                        conf->mirrors[d].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync;
+               mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
@@ -985,7 +987,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
        int i;
 
        if (conf->near_copies < conf->raid_disks)
-               seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
+               seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
        if (conf->near_copies > 1)
                seq_printf(seq, " %d near-copies", conf->near_copies);
        if (conf->far_copies > 1) {
@@ -1151,8 +1153,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
        for ( ; mirror <= last ; mirror++)
                if ( !(p=conf->mirrors+mirror)->rdev) {
 
-                       blk_queue_stack_limits(mddev->queue,
-                                              rdev->bdev->bd_disk->queue);
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
                        /* as we don't honour merge_bvec_fn, we must never risk
                         * violating it, so limit ->max_sector to one PAGE, as
                         * a one page request is never in violation.
@@ -1170,6 +1172,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                        break;
                }
 
+       md_integrity_add_rdev(rdev, mddev);
        print_conf(conf);
        return err;
 }
@@ -1203,7 +1206,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
                        /* lost the race, try later */
                        err = -EBUSY;
                        p->rdev = rdev;
+                       goto abort;
                }
+               md_integrity_register(mddev);
        }
 abort:
 
@@ -1607,7 +1612,7 @@ static void raid10d(mddev_t *mddev)
                                raid_end_bio_io(r10_bio);
                                bio_put(bio);
                        } else {
-                               const int do_sync = bio_sync(r10_bio->master_bio);
+                               const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
                                bio_put(bio);
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
@@ -1620,13 +1625,14 @@ static void raid10d(mddev_t *mddev)
                                bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
                                        + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
-                               bio->bi_rw = READ | do_sync;
+                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = raid10_end_read_request;
                                unplug = 1;
                                generic_make_request(bio);
                        }
                }
+               cond_resched();
        }
        if (unplug)
                unplug_slaves(mddev);
@@ -1770,7 +1776,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
        max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
        if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                /* recovery... the complicated one */
-               int i, j, k;
+               int j, k;
                r10_bio = NULL;
 
                for (i=0 ; i<conf->raid_disks; i++)
@@ -2044,15 +2050,16 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 static int run(mddev_t *mddev)
 {
        conf_t *conf;
-       int i, disk_idx;
+       int i, disk_idx, chunk_size;
        mirror_info_t *disk;
        mdk_rdev_t *rdev;
        int nc, fc, fo;
        sector_t stride, size;
 
-       if (mddev->chunk_size < PAGE_SIZE) {
+       if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
+           !is_power_of_2(mddev->chunk_sectors)) {
                printk(KERN_ERR "md/raid10: chunk size must be "
-                      "at least PAGE_SIZE(%ld).\n", PAGE_SIZE);
+                      "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
                return -EINVAL;
        }
 
@@ -2089,14 +2096,13 @@ static int run(mddev_t *mddev)
        if (!conf->tmppage)
                goto out_free_conf;
 
-       conf->mddev = mddev;
        conf->raid_disks = mddev->raid_disks;
        conf->near_copies = nc;
        conf->far_copies = fc;
        conf->copies = nc*fc;
        conf->far_offset = fo;
-       conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
-       conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+       conf->chunk_mask = mddev->chunk_sectors - 1;
+       conf->chunk_shift = ffz(~mddev->chunk_sectors);
        size = mddev->dev_sectors >> conf->chunk_shift;
        sector_div(size, fc);
        size = size * conf->raid_disks;
@@ -2126,9 +2132,18 @@ static int run(mddev_t *mddev)
                goto out_free_conf;
        }
 
+       conf->mddev = mddev;
        spin_lock_init(&conf->device_lock);
        mddev->queue->queue_lock = &conf->device_lock;
 
+       chunk_size = mddev->chunk_sectors << 9;
+       blk_queue_io_min(mddev->queue, chunk_size);
+       if (conf->raid_disks % conf->near_copies)
+               blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+       else
+               blk_queue_io_opt(mddev->queue, chunk_size *
+                                (conf->raid_disks / conf->near_copies));
+
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                disk_idx = rdev->raid_disk;
                if (disk_idx >= mddev->raid_disks
@@ -2137,9 +2152,8 @@ static int run(mddev_t *mddev)
                disk = conf->mirrors + disk_idx;
 
                disk->rdev = rdev;
-
-               blk_queue_stack_limits(mddev->queue,
-                                      rdev->bdev->bd_disk->queue);
+               disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                 rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
                 * violating it, so limit ->max_sector to one PAGE, as
                 * a one page request is never in violation.
@@ -2177,7 +2191,7 @@ static int run(mddev_t *mddev)
        }
 
 
-       mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
+       mddev->thread = md_register_thread(raid10d, mddev, NULL);
        if (!mddev->thread) {
                printk(KERN_ERR
                       "raid10: couldn't allocate thread for %s\n",
@@ -2185,6 +2199,10 @@ static int run(mddev_t *mddev)
                goto out_free_conf;
        }
 
+       if (mddev->recovery_cp != MaxSector)
+               printk(KERN_NOTICE "raid10: %s is not clean"
+                      " -- starting background reconstruction\n",
+                      mdname(mddev));
        printk(KERN_INFO
                "raid10: raid set %s active with %d out of %d devices\n",
                mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2204,7 +2222,8 @@ static int run(mddev_t *mddev)
         * maybe...
         */
        {
-               int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
+               int stripe = conf->raid_disks *
+                       ((mddev->chunk_sectors << 9) / PAGE_SIZE);
                stripe /= conf->near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -2212,6 +2231,7 @@ static int run(mddev_t *mddev)
 
        if (conf->near_copies < mddev->raid_disks)
                blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
+       md_integrity_register(mddev);
        return 0;
 
 out_free_conf: