md: don't unregister the thread in mddev_suspend
[safe/jmp/linux-2.6] / drivers / md / raid0.c
index a1f7147..9f9c6b7 100644 (file)
 #include <linux/seq_file.h>
 #include "md.h"
 #include "raid0.h"
+#include "raid5.h"
 
 static void raid0_unplug(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
        raid0_conf_t *conf = mddev->private;
        mdk_rdev_t **devlist = conf->devlist;
+       int raid_disks = conf->strip_zone[0].nb_dev;
        int i;
 
-       for (i=0; i<mddev->raid_disks; i++) {
+       for (i=0; i < raid_disks; i++) {
                struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
 
                blk_unplug(r_queue);
@@ -42,12 +44,13 @@ static int raid0_congested(void *data, int bits)
        mddev_t *mddev = data;
        raid0_conf_t *conf = mddev->private;
        mdk_rdev_t **devlist = conf->devlist;
+       int raid_disks = conf->strip_zone[0].nb_dev;
        int i, ret = 0;
 
        if (mddev_congested(mddev, bits))
                return 1;
 
-       for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+       for (i = 0; i < raid_disks && !ret ; i++) {
                struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 
                ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -65,6 +68,7 @@ static void dump_zones(mddev_t *mddev)
        sector_t zone_start = 0;
        char b[BDEVNAME_SIZE];
        raid0_conf_t *conf = mddev->private;
+       int raid_disks = conf->strip_zone[0].nb_dev;
        printk(KERN_INFO "******* %s configuration *********\n",
                mdname(mddev));
        h = 0;
@@ -72,7 +76,7 @@ static void dump_zones(mddev_t *mddev)
                printk(KERN_INFO "zone%d=[", j);
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
                        printk("%s/",
-                       bdevname(conf->devlist[j*mddev->raid_disks
+                       bdevname(conf->devlist[j*raid_disks
                                                + k]->bdev, b));
                printk("]\n");
 
@@ -87,7 +91,7 @@ static void dump_zones(mddev_t *mddev)
        printk(KERN_INFO "**********************************\n\n");
 }
 
-static int create_strip_zones(mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
 {
        int i, c, err;
        sector_t curr_zone_end, sectors;
@@ -161,6 +165,10 @@ static int create_strip_zones(mddev_t *mddev)
        list_for_each_entry(rdev1, &mddev->disks, same_set) {
                int j = rdev1->raid_disk;
 
+               if (mddev->level == 10)
+                       /* taking over a raid10-n2 array */
+                       j /= 2;
+
                if (j < 0 || j >= mddev->raid_disks) {
                        printk(KERN_ERR "raid0: bad disk number %d - "
                                "aborting!\n", j);
@@ -176,14 +184,15 @@ static int create_strip_zones(mddev_t *mddev)
                disk_stack_limits(mddev->gendisk, rdev1->bdev,
                                  rdev1->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_sector to one PAGE, as
-                * a one page request is never in violation.
+                * violating it, so limit ->max_segments to 1, lying within
+                * a single page.
                 */
 
-               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
-                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
-
+               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+                       blk_queue_max_segments(mddev->queue, 1);
+                       blk_queue_segment_boundary(mddev->queue,
+                                                  PAGE_CACHE_SIZE - 1);
+               }
                if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
                cnt++;
@@ -260,13 +269,14 @@ static int create_strip_zones(mddev_t *mddev)
                         (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
        printk(KERN_INFO "raid0: done.\n");
-       mddev->private = conf;
+       *private_conf = conf;
+
        return 0;
 abort:
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
-       mddev->private = NULL;
+       *private_conf = NULL;
        return err;
 }
 
@@ -317,6 +327,7 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 
 static int raid0_run(mddev_t *mddev)
 {
+       raid0_conf_t *conf;
        int ret;
 
        if (mddev->chunk_sectors == 0) {
@@ -328,9 +339,20 @@ static int raid0_run(mddev_t *mddev)
        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
        mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
-       ret = create_strip_zones(mddev);
-       if (ret < 0)
-               return ret;
+       /* if private is not null, we are here after takeover */
+       if (mddev->private == NULL) {
+               ret = create_strip_zones(mddev, &conf);
+               if (ret < 0)
+                       return ret;
+               mddev->private = conf;
+       }
+       conf = mddev->private;
+       if (conf->scale_raid_disks) {
+               int i;
+               for (i=0; i < conf->strip_zone[0].nb_dev; i++)
+                       conf->devlist[i]->raid_disk /= conf->scale_raid_disks;
+               /* FIXME update sysfs rd links */
+       }
 
        /* calculate array device size */
        md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
@@ -400,6 +422,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
        unsigned int sect_in_chunk;
        sector_t chunk;
        raid0_conf_t *conf = mddev->private;
+       int raid_disks = conf->strip_zone[0].nb_dev;
        unsigned int chunk_sects = mddev->chunk_sectors;
 
        if (is_power_of_2(chunk_sects)) {
@@ -422,7 +445,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
        *       + the position in the chunk
        */
        *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
-       return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+       return conf->devlist[(zone - conf->strip_zone)*raid_disks
                             + sector_div(sector, zone->nb_dev)];
 }
 
@@ -442,27 +465,18 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
        }
 }
 
-static int raid0_make_request(struct request_queue *q, struct bio *bio)
+static int raid0_make_request(mddev_t *mddev, struct bio *bio)
 {
-       mddev_t *mddev = q->queuedata;
        unsigned int chunk_sects;
        sector_t sector_offset;
        struct strip_zone *zone;
        mdk_rdev_t *tmp_dev;
-       const int rw = bio_data_dir(bio);
-       int cpu;
 
        if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
                md_barrier_request(mddev, bio);
                return 0;
        }
 
-       cpu = part_stat_lock();
-       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
-                     bio_sectors(bio));
-       part_stat_unlock();
-
        chunk_sects = mddev->chunk_sectors;
        if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
                sector_t sector = bio->bi_sector;
@@ -480,9 +494,9 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
                else
                        bp = bio_split(bio, chunk_sects -
                                       sector_div(sector, chunk_sects));
-               if (raid0_make_request(q, &bp->bio1))
+               if (raid0_make_request(mddev, &bp->bio1))
                        generic_make_request(&bp->bio1);
-               if (raid0_make_request(q, &bp->bio2))
+               if (raid0_make_request(mddev, &bp->bio2))
                        generic_make_request(&bp->bio2);
 
                bio_pair_release(bp);
@@ -517,6 +531,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
        int j, k, h;
        char b[BDEVNAME_SIZE];
        raid0_conf_t *conf = mddev->private;
+       int raid_disks = conf->strip_zone[0].nb_dev;
 
        sector_t zone_size;
        sector_t zone_start = 0;
@@ -527,7 +542,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
                seq_printf(seq, "=[");
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
                        seq_printf(seq, "%s/", bdevname(
-                               conf->devlist[j*mddev->raid_disks + k]
+                               conf->devlist[j*raid_disks + k]
                                                ->bdev, b));
 
                zone_size  = conf->strip_zone[j].zone_end - zone_start;
@@ -542,6 +557,99 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
        return;
 }
 
+static void *raid0_takeover_raid5(mddev_t *mddev)
+{
+       mdk_rdev_t *rdev;
+       raid0_conf_t *priv_conf;
+
+       if (mddev->degraded != 1) {
+               printk(KERN_ERR "md: raid5 must be degraded! Degraded disks: %d\n",
+                      mddev->degraded);
+               return ERR_PTR(-EINVAL);
+       }
+
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               /* check slot number for a disk */
+               if (rdev->raid_disk == mddev->raid_disks-1) {
+                       printk(KERN_ERR "md: raid5 must have missing parity disk!\n");
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       /* Set new parameters */
+       mddev->new_level = 0;
+       mddev->new_chunk_sectors = mddev->chunk_sectors;
+       mddev->raid_disks--;
+       mddev->delta_disks = -1;
+       /* make sure it will be not marked as dirty */
+       mddev->recovery_cp = MaxSector;
+
+       create_strip_zones(mddev, &priv_conf);
+       return priv_conf;
+}
+
+static void *raid0_takeover_raid10(mddev_t *mddev)
+{
+       raid0_conf_t *priv_conf;
+
+       /* Check layout:
+        *  - far_copies must be 1
+        *  - near_copies must be 2
+        *  - disks number must be even
+        *  - all mirrors must be already degraded
+        */
+       if (mddev->layout != ((1 << 8) + 2)) {
+               printk(KERN_ERR "md: Raid0 cannot takover layout: %x\n",
+                      mddev->layout);
+               return ERR_PTR(-EINVAL);
+       }
+       if (mddev->raid_disks & 1) {
+               printk(KERN_ERR "md: Raid0 cannot takover Raid10 with odd disk number.\n");
+               return ERR_PTR(-EINVAL);
+       }
+       if (mddev->degraded != (mddev->raid_disks>>1)) {
+               printk(KERN_ERR "md: All mirrors must be already degraded!\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* Set new parameters */
+       mddev->new_level = 0;
+       mddev->new_chunk_sectors = mddev->chunk_sectors;
+       mddev->delta_disks = - mddev->raid_disks / 2;
+       mddev->raid_disks += mddev->delta_disks;
+       mddev->degraded = 0;
+       /* make sure it will be not marked as dirty */
+       mddev->recovery_cp = MaxSector;
+
+       create_strip_zones(mddev, &priv_conf);
+       priv_conf->scale_raid_disks = 2;
+       return priv_conf;
+}
+
+static void *raid0_takeover(mddev_t *mddev)
+{
+       /* raid0 can take over:
+        *  raid5 - providing it is Raid4 layout and one disk is faulty
+        *  raid10 - assuming we have all necessary active disks
+        */
+       if (mddev->level == 5) {
+               if (mddev->layout == ALGORITHM_PARITY_N)
+                       return raid0_takeover_raid5(mddev);
+
+               printk(KERN_ERR "md: Raid can only takeover Raid5 with layout: %d\n",
+                      ALGORITHM_PARITY_N);
+       }
+
+       if (mddev->level == 10)
+               return raid0_takeover_raid10(mddev);
+
+       return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(mddev_t *mddev, int state)
+{
+}
+
 static struct mdk_personality raid0_personality=
 {
        .name           = "raid0",
@@ -552,6 +660,8 @@ static struct mdk_personality raid0_personality=
        .stop           = raid0_stop,
        .status         = raid0_status,
        .size           = raid0_size,
+       .takeover       = raid0_takeover,
+       .quiesce        = raid0_quiesce,
 };
 
 static int __init raid0_init (void)