md:Add support for Raid0->Raid5 takeover
[safe/jmp/linux-2.6] / drivers / md / linear.c
index 3603ffa..9db8ee0 100644 (file)
    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
 */
 
+#include <linux/blkdev.h>
+#include <linux/raid/md_u.h>
+#include <linux/seq_file.h>
+#include "md.h"
 #include "linear.h"
 
 /*
  */
 static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
 {
-       dev_info_t *hash;
-       linear_conf_t *conf = mddev_to_conf(mddev);
-       sector_t idx = sector >> conf->sector_shift;
+       int lo, mid, hi;
+       linear_conf_t *conf;
+
+       lo = 0;
+       hi = mddev->raid_disks - 1;
+       conf = rcu_dereference(mddev->private);
 
        /*
-        * sector_div(a,b) returns the remainer and sets a to a/b
+        * Binary Search
         */
-       (void)sector_div(idx, conf->spacing);
-       hash = conf->hash_table[idx];
 
-       while (sector >= hash->num_sectors + hash->start_sector)
-               hash++;
-       return hash;
+       while (hi > lo) {
+
+               mid = (hi + lo) / 2;
+               if (sector < conf->disks[mid].end_sector)
+                       hi = mid;
+               else
+                       lo = mid + 1;
+       }
+
+       return conf->disks + lo;
 }
 
 /**
@@ -55,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q,
        unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 
+       rcu_read_lock();
        dev0 = which_dev(mddev, sector);
-       maxsectors = dev0->num_sectors - (sector - dev0->start_sector);
+       maxsectors = dev0->end_sector - sector;
+       rcu_read_unlock();
 
        if (maxsectors < bio_sectors)
                maxsectors = 0;
@@ -75,36 +89,60 @@ static int linear_mergeable_bvec(struct request_queue *q,
 static void linear_unplug(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
-       linear_conf_t *conf = mddev_to_conf(mddev);
+       linear_conf_t *conf;
        int i;
 
+       rcu_read_lock();
+       conf = rcu_dereference(mddev->private);
+
        for (i=0; i < mddev->raid_disks; i++) {
                struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
                blk_unplug(r_queue);
        }
+       rcu_read_unlock();
 }
 
 static int linear_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
-       linear_conf_t *conf = mddev_to_conf(mddev);
+       linear_conf_t *conf;
        int i, ret = 0;
 
+       if (mddev_congested(mddev, bits))
+               return 1;
+
+       rcu_read_lock();
+       conf = rcu_dereference(mddev->private);
+
        for (i = 0; i < mddev->raid_disks && !ret ; i++) {
                struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
                ret |= bdi_congested(&q->backing_dev_info, bits);
        }
+
+       rcu_read_unlock();
        return ret;
 }
 
+static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+{
+       linear_conf_t *conf;
+       sector_t array_sectors;
+
+       rcu_read_lock();
+       conf = rcu_dereference(mddev->private);
+       WARN_ONCE(sectors || raid_disks,
+                 "%s does not support generic reshape\n", __func__);
+       array_sectors = conf->array_sectors;
+       rcu_read_unlock();
+
+       return array_sectors;
+}
+
 static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
 {
        linear_conf_t *conf;
-       dev_info_t **table;
        mdk_rdev_t *rdev;
-       int i, nb_zone, cnt;
-       sector_t min_sectors;
-       sector_t curr_sector;
+       int i, cnt;
 
        conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
                        GFP_KERNEL);
@@ -117,6 +155,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                int j = rdev->raid_disk;
                dev_info_t *disk = conf->disks + j;
+               sector_t sectors;
 
                if (j < 0 || j >= raid_disks || disk->rdev) {
                        printk("linear: disk numbering problem. Aborting!\n");
@@ -124,113 +163,42 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
                }
 
                disk->rdev = rdev;
+               if (mddev->chunk_sectors) {
+                       sectors = rdev->sectors;
+                       sector_div(sectors, mddev->chunk_sectors);
+                       rdev->sectors = sectors * mddev->chunk_sectors;
+               }
 
-               blk_queue_stack_limits(mddev->queue,
-                                      rdev->bdev->bd_disk->queue);
+               disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                 rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_sector to one PAGE, as
-                * a one page request is never in violation.
+                * violating it, so limit max_segments to 1 lying within
+                * a single page.
                 */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
-
-               disk->num_sectors = rdev->size * 2;
-               conf->array_sectors += rdev->size * 2;
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+                       blk_queue_max_segments(mddev->queue, 1);
+                       blk_queue_segment_boundary(mddev->queue,
+                                                  PAGE_CACHE_SIZE - 1);
+               }
 
+               conf->array_sectors += rdev->sectors;
                cnt++;
+
        }
        if (cnt != raid_disks) {
                printk("linear: not enough drives present. Aborting!\n");
                goto out;
        }
 
-       min_sectors = conf->array_sectors;
-       sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
-       if (min_sectors == 0)
-               min_sectors = 1;
-
-       /* min_sectors is the minimum spacing that will fit the hash
-        * table in one PAGE.  This may be much smaller than needed.
-        * We find the smallest non-terminal set of consecutive devices
-        * that is larger than min_sectors and use the size of that as
-        * the actual spacing
-        */
-       conf->spacing = conf->array_sectors;
-       for (i=0; i < cnt-1 ; i++) {
-               sector_t tmp = 0;
-               int j;
-               for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
-                       tmp += conf->disks[j].num_sectors;
-               if (tmp >= min_sectors && tmp < conf->spacing)
-                       conf->spacing = tmp;
-       }
-
-       /* spacing may be too large for sector_div to work with,
-        * so we might need to pre-shift
-        */
-       conf->sector_shift = 0;
-       if (sizeof(sector_t) > sizeof(u32)) {
-               sector_t space = conf->spacing;
-               while (space > (sector_t)(~(u32)0)) {
-                       space >>= 1;
-                       conf->sector_shift++;
-               }
-       }
        /*
-        * This code was restructured to work around a gcc-2.95.3 internal
-        * compiler error.  Alter it with care.
+        * Here we calculate the device offsets.
         */
-       {
-               sector_t sz;
-               unsigned round;
-               unsigned long base;
-
-               sz = conf->array_sectors >> conf->sector_shift;
-               sz += 1; /* force round-up */
-               base = conf->spacing >> conf->sector_shift;
-               round = sector_div(sz, base);
-               nb_zone = sz + (round ? 1 : 0);
-       }
-       BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
+       conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
 
-       conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
-                                       GFP_KERNEL);
-       if (!conf->hash_table)
-               goto out;
-
-       /*
-        * Here we generate the linear hash table
-        * First calculate the device offsets.
-        */
-       conf->disks[0].start_sector = 0;
        for (i = 1; i < raid_disks; i++)
-               conf->disks[i].start_sector =
-                       conf->disks[i-1].start_sector +
-                       conf->disks[i-1].num_sectors;
-
-       table = conf->hash_table;
-       i = 0;
-       for (curr_sector = 0;
-            curr_sector < conf->array_sectors;
-            curr_sector += conf->spacing) {
-
-               while (i < raid_disks-1 &&
-                      curr_sector >= conf->disks[i+1].start_sector)
-                       i++;
-
-               *table ++ = conf->disks + i;
-       }
-
-       if (conf->sector_shift) {
-               conf->spacing >>= conf->sector_shift;
-               /* round spacing up so that when we divide by it,
-                * we err on the side of "too-low", which is safest.
-                */
-               conf->spacing++;
-       }
-
-       BUG_ON(table - conf->hash_table > nb_zone);
+               conf->disks[i].end_sector =
+                       conf->disks[i-1].end_sector +
+                       conf->disks[i].rdev->sectors;
 
        return conf;
 
@@ -243,21 +211,30 @@ static int linear_run (mddev_t *mddev)
 {
        linear_conf_t *conf;
 
+       if (md_check_no_bitmap(mddev))
+               return -EINVAL;
        mddev->queue->queue_lock = &mddev->queue->__queue_lock;
        conf = linear_conf(mddev, mddev->raid_disks);
 
        if (!conf)
                return 1;
        mddev->private = conf;
-       mddev->array_sectors = conf->array_sectors;
+       md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
 
        blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
        mddev->queue->unplug_fn = linear_unplug;
        mddev->queue->backing_dev_info.congested_fn = linear_congested;
        mddev->queue->backing_dev_info.congested_data = mddev;
+       md_integrity_register(mddev);
        return 0;
 }
 
+static void free_conf(struct rcu_head *head)
+{
+       linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
+       kfree(conf);
+}
+
 static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        /* Adding a drive to a linear array allows the array to grow.
@@ -268,7 +245,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
         * The current one is never freed until the array is stopped.
         * This avoids races.
         */
-       linear_conf_t *newconf;
+       linear_conf_t *newconf, *oldconf;
 
        if (rdev->saved_raid_disk != mddev->raid_disks)
                return -EINVAL;
@@ -280,25 +257,31 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
        if (!newconf)
                return -ENOMEM;
 
-       newconf->prev = mddev_to_conf(mddev);
-       mddev->private = newconf;
+       oldconf = rcu_dereference(mddev->private);
        mddev->raid_disks++;
-       mddev->array_sectors = newconf->array_sectors;
+       rcu_assign_pointer(mddev->private, newconf);
+       md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
        set_capacity(mddev->gendisk, mddev->array_sectors);
+       revalidate_disk(mddev->gendisk);
+       call_rcu(&oldconf->rcu, free_conf);
        return 0;
 }
 
 static int linear_stop (mddev_t *mddev)
 {
-       linear_conf_t *conf = mddev_to_conf(mddev);
-  
+       linear_conf_t *conf = mddev->private;
+
+       /*
+        * We do not require rcu protection here since
+        * we hold reconfig_mutex for both linear_add and
+        * linear_stop, so they cannot race.
+        * We should make sure any old 'conf's are properly
+        * freed though.
+        */
+       rcu_barrier();
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       do {
-               linear_conf_t *t = conf->prev;
-               kfree(conf->hash_table);
-               kfree(conf);
-               conf = t;
-       } while (conf);
+       kfree(conf);
+       mddev->private = NULL;
 
        return 0;
 }
@@ -308,10 +291,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
        const int rw = bio_data_dir(bio);
        mddev_t *mddev = q->queuedata;
        dev_info_t *tmp_dev;
+       sector_t start_sector;
        int cpu;
 
-       if (unlikely(bio_barrier(bio))) {
-               bio_endio(bio, -EOPNOTSUPP);
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+               md_barrier_request(mddev, bio);
                return 0;
        }
 
@@ -321,33 +305,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
                      bio_sectors(bio));
        part_stat_unlock();
 
+       rcu_read_lock();
        tmp_dev = which_dev(mddev, bio->bi_sector);
-    
-       if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
-                                       tmp_dev->start_sector)
-                    || (bio->bi_sector <
-                        tmp_dev->start_sector))) {
+       start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+
+
+       if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
+                    || (bio->bi_sector < start_sector))) {
                char b[BDEVNAME_SIZE];
 
                printk("linear_make_request: Sector %llu out of bounds on "
                        "dev %s: %llu sectors, offset %llu\n",
                        (unsigned long long)bio->bi_sector,
                        bdevname(tmp_dev->rdev->bdev, b),
-                       (unsigned long long)tmp_dev->num_sectors,
-                       (unsigned long long)tmp_dev->start_sector);
+                       (unsigned long long)tmp_dev->rdev->sectors,
+                       (unsigned long long)start_sector);
+               rcu_read_unlock();
                bio_io_error(bio);
                return 0;
        }
        if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
-                    tmp_dev->start_sector + tmp_dev->num_sectors)) {
+                    tmp_dev->end_sector)) {
                /* This bio crosses a device boundary, so we have to
                 * split it.
                 */
                struct bio_pair *bp;
+               sector_t end_sector = tmp_dev->end_sector;
+
+               rcu_read_unlock();
 
-               bp = bio_split(bio,
-                              tmp_dev->start_sector + tmp_dev->num_sectors
-                              - bio->bi_sector);
+               bp = bio_split(bio, end_sector - bio->bi_sector);
 
                if (linear_make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
@@ -358,8 +345,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
        }
                    
        bio->bi_bdev = tmp_dev->rdev->bdev;
-       bio->bi_sector = bio->bi_sector - tmp_dev->start_sector
+       bio->bi_sector = bio->bi_sector - start_sector
                + tmp_dev->rdev->data_offset;
+       rcu_read_unlock();
 
        return 1;
 }
@@ -367,7 +355,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
 static void linear_status (struct seq_file *seq, mddev_t *mddev)
 {
 
-       seq_printf(seq, " %dk rounding", mddev->chunk_size/1024);
+       seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
 }
 
 
@@ -381,6 +369,7 @@ static struct mdk_personality linear_personality =
        .stop           = linear_stop,
        .status         = linear_status,
        .hot_add_disk   = linear_add,
+       .size           = linear_size,
 };
 
 static int __init linear_init (void)
@@ -397,6 +386,7 @@ static void linear_exit (void)
 module_init(linear_init);
 module_exit(linear_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD");
 MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
 MODULE_ALIAS("md-linear");
 MODULE_ALIAS("md-level--1");