include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / md / raid0.c
index f675725..c3bec02 100644 (file)
    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
 */
 
-#include <linux/module.h>
-#include <linux/raid/raid0.h>
+#include <linux/blkdev.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "md.h"
+#include "raid0.h"
 
-#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
-#define MD_PERSONALITY
-
-static void raid0_unplug(request_queue_t *q)
+static void raid0_unplug(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
-       raid0_conf_t *conf = mddev_to_conf(mddev);
-       mdk_rdev_t **devlist = conf->strip_zone[0].dev;
+       raid0_conf_t *conf = mddev->private;
+       mdk_rdev_t **devlist = conf->devlist;
        int i;
 
        for (i=0; i<mddev->raid_disks; i++) {
-               request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
+               struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
 
-               if (r_queue->unplug_fn)
-                       r_queue->unplug_fn(r_queue);
+               blk_unplug(r_queue);
        }
 }
 
-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
-                            sector_t *error_sector)
+static int raid0_congested(void *data, int bits)
 {
-       mddev_t *mddev = q->queuedata;
-       raid0_conf_t *conf = mddev_to_conf(mddev);
-       mdk_rdev_t **devlist = conf->strip_zone[0].dev;
+       mddev_t *mddev = data;
+       raid0_conf_t *conf = mddev->private;
+       mdk_rdev_t **devlist = conf->devlist;
        int i, ret = 0;
 
-       for (i=0; i<mddev->raid_disks && ret == 0; i++) {
-               struct block_device *bdev = devlist[i]->bdev;
-               request_queue_t *r_queue = bdev_get_queue(bdev);
+       if (mddev_congested(mddev, bits))
+               return 1;
 
-               if (!r_queue->issue_flush_fn)
-                       ret = -EOPNOTSUPP;
-               else
-                       ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
+       for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+               struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
+
+               ret |= bdi_congested(&q->backing_dev_info, bits);
        }
        return ret;
 }
 
+/*
+ * inform the user of the raid configuration
+*/
+static void dump_zones(mddev_t *mddev)
+{
+       int j, k, h;
+       sector_t zone_size = 0;
+       sector_t zone_start = 0;
+       char b[BDEVNAME_SIZE];
+       raid0_conf_t *conf = mddev->private;
+       printk(KERN_INFO "******* %s configuration *********\n",
+               mdname(mddev));
+       h = 0;
+       for (j = 0; j < conf->nr_strip_zones; j++) {
+               printk(KERN_INFO "zone%d=[", j);
+               for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
+                       printk("%s/",
+                       bdevname(conf->devlist[j*mddev->raid_disks
+                                               + k]->bdev, b));
+               printk("]\n");
+
+               zone_size  = conf->strip_zone[j].zone_end - zone_start;
+               printk(KERN_INFO "        zone offset=%llukb "
+                               "device offset=%llukb size=%llukb\n",
+                       (unsigned long long)zone_start>>1,
+                       (unsigned long long)conf->strip_zone[j].dev_start>>1,
+                       (unsigned long long)zone_size>>1);
+               zone_start = conf->strip_zone[j].zone_end;
+       }
+       printk(KERN_INFO "**********************************\n\n");
+}
 
-static int create_strip_zones (mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev)
 {
-       int i, c, j;
-       sector_t current_offset, curr_zone_offset;
-       sector_t min_spacing;
-       raid0_conf_t *conf = mddev_to_conf(mddev);
-       mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
-       struct list_head *tmp1, *tmp2;
+       int i, c, err;
+       sector_t curr_zone_end, sectors;
+       mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
        struct strip_zone *zone;
        int cnt;
        char b[BDEVNAME_SIZE];
-       /*
-        * The number of 'same size groups'
-        */
-       conf->nr_strip_zones = 0;
-       ITERATE_RDEV(mddev,rdev1,tmp1) {
-               printk("raid0: looking at %s\n",
+       raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+
+       if (!conf)
+               return -ENOMEM;
+       list_for_each_entry(rdev1, &mddev->disks, same_set) {
+               printk(KERN_INFO "raid0: looking at %s\n",
                        bdevname(rdev1->bdev,b));
                c = 0;
-               ITERATE_RDEV(mddev,rdev2,tmp2) {
-                       printk("raid0:   comparing %s(%llu)",
+
+               /* round size to chunk_size */
+               sectors = rdev1->sectors;
+               sector_div(sectors, mddev->chunk_sectors);
+               rdev1->sectors = sectors * mddev->chunk_sectors;
+
+               list_for_each_entry(rdev2, &mddev->disks, same_set) {
+                       printk(KERN_INFO "raid0:   comparing %s(%llu)",
                               bdevname(rdev1->bdev,b),
-                              (unsigned long long)rdev1->size);
-                       printk(" with %s(%llu)\n",
+                              (unsigned long long)rdev1->sectors);
+                       printk(KERN_INFO " with %s(%llu)\n",
                               bdevname(rdev2->bdev,b),
-                              (unsigned long long)rdev2->size);
+                              (unsigned long long)rdev2->sectors);
                        if (rdev2 == rdev1) {
-                               printk("raid0:   END\n");
+                               printk(KERN_INFO "raid0:   END\n");
                                break;
                        }
-                       if (rdev2->size == rdev1->size)
-                       {
+                       if (rdev2->sectors == rdev1->sectors) {
                                /*
                                 * Not unique, don't count it as a new
                                 * group
                                 */
-                               printk("raid0:   EQUAL\n");
+                               printk(KERN_INFO "raid0:   EQUAL\n");
                                c = 1;
                                break;
                        }
-                       printk("raid0:   NOT EQUAL\n");
+                       printk(KERN_INFO "raid0:   NOT EQUAL\n");
                }
                if (!c) {
-                       printk("raid0:   ==> UNIQUE\n");
+                       printk(KERN_INFO "raid0:   ==> UNIQUE\n");
                        conf->nr_strip_zones++;
-                       printk("raid0: %d zones\n", conf->nr_strip_zones);
+                       printk(KERN_INFO "raid0: %d zones\n",
+                               conf->nr_strip_zones);
                }
        }
-       printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
-
-       conf->strip_zone = kmalloc(sizeof(struct strip_zone)*
+       printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
+       err = -ENOMEM;
+       conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
                                conf->nr_strip_zones, GFP_KERNEL);
        if (!conf->strip_zone)
-               return 1;
-       conf->devlist = kmalloc(sizeof(mdk_rdev_t*)*
+               goto abort;
+       conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
                                conf->nr_strip_zones*mddev->raid_disks,
                                GFP_KERNEL);
        if (!conf->devlist)
-               return 1;
-
-       memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
-                                  conf->nr_strip_zones);
-       memset(conf->devlist, 0,
-              sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks);
+               goto abort;
 
        /* The first zone must contain all devices, so here we check that
         * there is a proper alignment of slots to devices and find them all
@@ -134,137 +157,145 @@ static int create_strip_zones (mddev_t *mddev)
        zone = &conf->strip_zone[0];
        cnt = 0;
        smallest = NULL;
-       zone->dev = conf->devlist;
-       ITERATE_RDEV(mddev, rdev1, tmp1) {
+       dev = conf->devlist;
+       err = -EINVAL;
+       list_for_each_entry(rdev1, &mddev->disks, same_set) {
                int j = rdev1->raid_disk;
 
                if (j < 0 || j >= mddev->raid_disks) {
-                       printk("raid0: bad disk number %d - aborting!\n", j);
+                       printk(KERN_ERR "raid0: bad disk number %d - "
+                               "aborting!\n", j);
                        goto abort;
                }
-               if (zone->dev[j]) {
-                       printk("raid0: multiple devices for %d - aborting!\n",
-                               j);
+               if (dev[j]) {
+                       printk(KERN_ERR "raid0: multiple devices for %d - "
+                               "aborting!\n", j);
                        goto abort;
                }
-               zone->dev[j] = rdev1;
+               dev[j] = rdev1;
 
-               blk_queue_stack_limits(mddev->queue,
-                                      rdev1->bdev->bd_disk->queue);
+               disk_stack_limits(mddev->gendisk, rdev1->bdev,
+                                 rdev1->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_sector to one PAGE, as
-                * a one page request is never in violation.
+                * violating it, so limit ->max_segments to 1, lying within
+                * a single page.
                 */
 
-               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
-
-               if (!smallest || (rdev1->size <smallest->size))
+               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+                       blk_queue_max_segments(mddev->queue, 1);
+                       blk_queue_segment_boundary(mddev->queue,
+                                                  PAGE_CACHE_SIZE - 1);
+               }
+               if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
                cnt++;
        }
        if (cnt != mddev->raid_disks) {
-               printk("raid0: too few disks (%d of %d) - aborting!\n",
-                       cnt, mddev->raid_disks);
+               printk(KERN_ERR "raid0: too few disks (%d of %d) - "
+                       "aborting!\n", cnt, mddev->raid_disks);
                goto abort;
        }
        zone->nb_dev = cnt;
-       zone->size = smallest->size * cnt;
-       zone->zone_offset = 0;
+       zone->zone_end = smallest->sectors * cnt;
 
-       current_offset = smallest->size;
-       curr_zone_offset = zone->size;
+       curr_zone_end = zone->zone_end;
 
        /* now do the other zones */
        for (i = 1; i < conf->nr_strip_zones; i++)
        {
+               int j;
+
                zone = conf->strip_zone + i;
-               zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
+               dev = conf->devlist + i * mddev->raid_disks;
 
-               printk("raid0: zone %d\n", i);
-               zone->dev_offset = current_offset;
+               printk(KERN_INFO "raid0: zone %d\n", i);
+               zone->dev_start = smallest->sectors;
                smallest = NULL;
                c = 0;
 
                for (j=0; j<cnt; j++) {
-                       char b[BDEVNAME_SIZE];
-                       rdev = conf->strip_zone[0].dev[j];
-                       printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
-                       if (rdev->size > current_offset)
-                       {
-                               printk(" contained as device %d\n", c);
-                               zone->dev[c] = rdev;
-                               c++;
-                               if (!smallest || (rdev->size <smallest->size)) {
-                                       smallest = rdev;
-                                       printk("  (%llu) is smallest!.\n", 
-                                               (unsigned long long)rdev->size);
-                               }
-                       } else
-                               printk(" nope.\n");
+                       rdev = conf->devlist[j];
+                       printk(KERN_INFO "raid0: checking %s ...",
+                               bdevname(rdev->bdev, b));
+                       if (rdev->sectors <= zone->dev_start) {
+                               printk(KERN_INFO " nope.\n");
+                               continue;
+                       }
+                       printk(KERN_INFO " contained as device %d\n", c);
+                       dev[c] = rdev;
+                       c++;
+                       if (!smallest || rdev->sectors < smallest->sectors) {
+                               smallest = rdev;
+                               printk(KERN_INFO "  (%llu) is smallest!.\n",
+                                       (unsigned long long)rdev->sectors);
+                       }
                }
 
                zone->nb_dev = c;
-               zone->size = (smallest->size - current_offset) * c;
-               printk("raid0: zone->nb_dev: %d, size: %llu\n",
-                       zone->nb_dev, (unsigned long long)zone->size);
+               sectors = (smallest->sectors - zone->dev_start) * c;
+               printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
+                       zone->nb_dev, (unsigned long long)sectors);
 
-               zone->zone_offset = curr_zone_offset;
-               curr_zone_offset += zone->size;
+               curr_zone_end += sectors;
+               zone->zone_end = curr_zone_end;
 
-               current_offset = smallest->size;
-               printk("raid0: current zone offset: %llu\n",
-                       (unsigned long long)current_offset);
+               printk(KERN_INFO "raid0: current zone start: %llu\n",
+                       (unsigned long long)smallest->sectors);
        }
+       mddev->queue->unplug_fn = raid0_unplug;
+       mddev->queue->backing_dev_info.congested_fn = raid0_congested;
+       mddev->queue->backing_dev_info.congested_data = mddev;
 
-       /* Now find appropriate hash spacing.
-        * We want a number which causes most hash entries to cover
-        * at most two strips, but the hash table must be at most
-        * 1 PAGE.  We choose the smallest strip, or contiguous collection
-        * of strips, that has big enough size.  We never consider the last
-        * strip though as it's size has no bearing on the efficacy of the hash
-        * table.
+       /*
+        * now since we have the hard sector sizes, we can make sure
+        * chunk size is a multiple of that sector size
         */
-       conf->hash_spacing = curr_zone_offset;
-       min_spacing = curr_zone_offset;
-       sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
-       for (i=0; i < conf->nr_strip_zones-1; i++) {
-               sector_t sz = 0;
-               for (j=i; j<conf->nr_strip_zones-1 &&
-                            sz < min_spacing ; j++)
-                       sz += conf->strip_zone[j].size;
-               if (sz >= min_spacing && sz < conf->hash_spacing)
-                       conf->hash_spacing = sz;
+       if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+               printk(KERN_ERR "%s chunk_size of %d not valid\n",
+                      mdname(mddev),
+                      mddev->chunk_sectors << 9);
+               goto abort;
        }
 
-       mddev->queue->unplug_fn = raid0_unplug;
-
-       mddev->queue->issue_flush_fn = raid0_issue_flush;
+       blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+       blk_queue_io_opt(mddev->queue,
+                        (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
-       printk("raid0: done.\n");
+       printk(KERN_INFO "raid0: done.\n");
+       mddev->private = conf;
        return 0;
- abort:
-       return 1;
+abort:
+       kfree(conf->strip_zone);
+       kfree(conf->devlist);
+       kfree(conf);
+       mddev->private = NULL;
+       return err;
 }
 
 /**
  *     raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
  *     @q: request queue
- *     @bio: the buffer head that's been built up so far
+ *     @bvm: properties of new bio
  *     @biovec: the request that could be merged to it.
  *
  *     Return amount of bytes we can accept at this offset
  */
-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q,
+                               struct bvec_merge_data *bvm,
+                               struct bio_vec *biovec)
 {
        mddev_t *mddev = q->queuedata;
-       sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
-       unsigned int bio_sectors = bio->bi_size >> 9;
-
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
+       unsigned int bio_sectors = bvm->bi_size >> 9;
+
+       if (is_power_of_2(chunk_sectors))
+               max =  (chunk_sectors - ((sector & (chunk_sectors-1))
+                                               + bio_sectors)) << 9;
+       else
+               max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
+                                               + bio_sectors)) << 9;
        if (max < 0) max = 0; /* bio_add cannot handle a negative return */
        if (max <= biovec->bv_len && bio_sectors == 0)
                return biovec->bv_len;
@@ -272,85 +303,42 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
                return max;
 }
 
-static int raid0_run (mddev_t *mddev)
+static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 {
-       unsigned  cur=0, i=0, nb_zone;
-       s64 size;
-       raid0_conf_t *conf;
+       sector_t array_sectors = 0;
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
 
-       printk("%s: setting max_sectors to %d, segment boundary to %d\n",
-              mdname(mddev),
-              mddev->chunk_size >> 9,
-              (mddev->chunk_size>>1)-1);
-       blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
-       blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
+       WARN_ONCE(sectors || raid_disks,
+                 "%s does not support generic reshape\n", __func__);
 
-       conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
-       if (!conf)
-               goto out;
-       mddev->private = (void *)conf;
-       conf->strip_zone = NULL;
-       conf->devlist = NULL;
-       if (create_strip_zones (mddev)) 
-               goto out_free_conf;
+       list_for_each_entry(rdev, &mddev->disks, same_set)
+               array_sectors += rdev->sectors;
 
-       /* calculate array device size */
-       mddev->array_size = 0;
-       ITERATE_RDEV(mddev,rdev,tmp)
-               mddev->array_size += rdev->size;
-
-       printk("raid0 : md_size is %llu blocks.\n", 
-               (unsigned long long)mddev->array_size);
-       printk("raid0 : conf->hash_spacing is %llu blocks.\n",
-               (unsigned long long)conf->hash_spacing);
-       {
-#if __GNUC__ < 3
-               volatile
-#endif
-               sector_t s = mddev->array_size;
-               sector_t space = conf->hash_spacing;
-               int round;
-               conf->preshift = 0;
-               if (sizeof(sector_t) > sizeof(u32)) {
-                       /*shift down space and s so that sector_div will work */
-                       while (space > (sector_t) (~(u32)0)) {
-                               s >>= 1;
-                               space >>= 1;
-                               s += 1; /* force round-up */
-                               conf->preshift++;
-                       }
-               }
-               round = sector_div(s, (u32)space) ? 1 : 0;
-               nb_zone = s + round;
-       }
-       printk("raid0 : nb_zone is %d.\n", nb_zone);
-
-       printk("raid0 : Allocating %Zd bytes for hash.\n",
-                               nb_zone*sizeof(struct strip_zone*));
-       conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
-       if (!conf->hash_table)
-               goto out_free_conf;
-       size = conf->strip_zone[cur].size;
-
-       for (i=0; i< nb_zone; i++) {
-               conf->hash_table[i] = conf->strip_zone + cur;
-               while (size <= conf->hash_spacing) {
-                       cur++;
-                       size += conf->strip_zone[cur].size;
-               }
-               size -= conf->hash_spacing;
-       }
-       if (conf->preshift) {
-               conf->hash_spacing >>= conf->preshift;
-               /* round hash_spacing up so when we divide by it, we
-                * err on the side of too-low, which is safest
-                */
-               conf->hash_spacing++;
+       return array_sectors;
+}
+
+static int raid0_run(mddev_t *mddev)
+{
+       int ret;
+
+       if (mddev->chunk_sectors == 0) {
+               printk(KERN_ERR "md/raid0: chunk size must be set.\n");
+               return -EINVAL;
        }
+       if (md_check_no_bitmap(mddev))
+               return -EINVAL;
+       blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+       mddev->queue->queue_lock = &mddev->queue->__queue_lock;
+
+       ret = create_strip_zones(mddev);
+       if (ret < 0)
+               return ret;
 
+       /* calculate array device size */
+       md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
+
+       printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
+               (unsigned long long)mddev->array_sectors);
        /* calculate the max read-ahead size.
         * For read-ahead of large files to be effective, we need to
         * readahead at least twice a whole stripe. i.e. number of devices
@@ -361,69 +349,125 @@ static int raid0_run (mddev_t *mddev)
         * chunksize should be used in that case.
         */
        {
-               int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE;
+               int stripe = mddev->raid_disks *
+                       (mddev->chunk_sectors << 9) / PAGE_SIZE;
                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
        }
 
-
        blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
+       dump_zones(mddev);
+       md_integrity_register(mddev);
        return 0;
+}
 
-out_free_conf:
+static int raid0_stop(mddev_t *mddev)
+{
+       raid0_conf_t *conf = mddev->private;
+
+       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        kfree(conf->strip_zone);
        kfree(conf->devlist);
        kfree(conf);
        mddev->private = NULL;
-out:
-       return 1;
+       return 0;
 }
 
-static int raid0_stop (mddev_t *mddev)
+/* Find the zone which holds a particular offset
+ * Update *sectorp to be an offset in that zone
+ */
+static struct strip_zone *find_zone(struct raid0_private_data *conf,
+                                   sector_t *sectorp)
 {
-       raid0_conf_t *conf = mddev_to_conf(mddev);
+       int i;
+       struct strip_zone *z = conf->strip_zone;
+       sector_t sector = *sectorp;
+
+       for (i = 0; i < conf->nr_strip_zones; i++)
+               if (sector < z[i].zone_end) {
+                       if (i)
+                               *sectorp = sector - z[i-1].zone_end;
+                       return z + i;
+               }
+       BUG();
+}
 
-       blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       kfree(conf->hash_table);
-       conf->hash_table = NULL;
-       kfree(conf->strip_zone);
-       conf->strip_zone = NULL;
-       kfree(conf);
-       mddev->private = NULL;
+/*
+ * remaps the bio to the target device. we separate two flows.
+ * power 2 flow and a general flow for the sake of perfromance
+*/
+static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
+                               sector_t sector, sector_t *sector_offset)
+{
+       unsigned int sect_in_chunk;
+       sector_t chunk;
+       raid0_conf_t *conf = mddev->private;
+       unsigned int chunk_sects = mddev->chunk_sectors;
+
+       if (is_power_of_2(chunk_sects)) {
+               int chunksect_bits = ffz(~chunk_sects);
+               /* find the sector offset inside the chunk */
+               sect_in_chunk  = sector & (chunk_sects - 1);
+               sector >>= chunksect_bits;
+               /* chunk in zone */
+               chunk = *sector_offset;
+               /* quotient is the chunk in real device*/
+               sector_div(chunk, zone->nb_dev << chunksect_bits);
+       } else{
+               sect_in_chunk = sector_div(sector, chunk_sects);
+               chunk = *sector_offset;
+               sector_div(chunk, chunk_sects * zone->nb_dev);
+       }
+       /*
+       *  position the bio over the real device
+       *  real sector = chunk in device + starting of zone
+       *       + the position in the chunk
+       */
+       *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
+       return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+                            + sector_div(sector, zone->nb_dev)];
+}
 
-       return 0;
+/*
+ * Is io distribute over 1 or more chunks ?
+*/
+static inline int is_io_in_chunk_boundary(mddev_t *mddev,
+                       unsigned int chunk_sects, struct bio *bio)
+{
+       if (likely(is_power_of_2(chunk_sects))) {
+               return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+                                       + (bio->bi_size >> 9));
+       } else{
+               sector_t sector = bio->bi_sector;
+               return chunk_sects >= (sector_div(sector, chunk_sects)
+                                               + (bio->bi_size >> 9));
+       }
 }
 
-static int raid0_make_request (request_queue_t *q, struct bio *bio)
+static int raid0_make_request(struct request_queue *q, struct bio *bio)
 {
        mddev_t *mddev = q->queuedata;
-       unsigned int sect_in_chunk, chunksize_bits,  chunk_size, chunk_sects;
-       raid0_conf_t *conf = mddev_to_conf(mddev);
+       unsigned int chunk_sects;
+       sector_t sector_offset;
        struct strip_zone *zone;
        mdk_rdev_t *tmp_dev;
-       unsigned long chunk;
-       sector_t block, rsect;
+       const int rw = bio_data_dir(bio);
+       int cpu;
 
-       if (unlikely(bio_barrier(bio))) {
-               bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+               md_barrier_request(mddev, bio);
                return 0;
        }
 
-       if (bio_data_dir(bio)==WRITE) {
-               disk_stat_inc(mddev->gendisk, writes);
-               disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
-       } else {
-               disk_stat_inc(mddev->gendisk, reads);
-               disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
-       }
-
-       chunk_size = mddev->chunk_size >> 10;
-       chunk_sects = mddev->chunk_size >> 9;
-       chunksize_bits = ffz(~chunk_size);
-       block = bio->bi_sector >> 1;
-       
+       cpu = part_stat_lock();
+       part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+       part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+                     bio_sectors(bio));
+       part_stat_unlock();
 
-       if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
+       chunk_sects = mddev->chunk_sectors;
+       if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
+               sector_t sector = bio->bi_sector;
                struct bio_pair *bp;
                /* Sanity check -- queue functions should prevent this happening */
                if (bio->bi_vcnt != 1 ||
@@ -432,7 +476,12 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
                /* This is a one page bio that upper layers
                 * refuse to split for us, so we need to split it.
                 */
-               bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+               if (likely(is_power_of_2(chunk_sects)))
+                       bp = bio_split(bio, chunk_sects - (sector &
+                                                          (chunk_sects-1)));
+               else
+                       bp = bio_split(bio, chunk_sects -
+                                      sector_div(sector, chunk_sects));
                if (raid0_make_request(q, &bp->bio1))
                        generic_make_request(&bp->bio1);
                if (raid0_make_request(q, &bp->bio2))
@@ -441,39 +490,14 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
                bio_pair_release(bp);
                return 0;
        }
 
-       {
-#if __GNUC__ < 3
-               volatile
-#endif
-               sector_t x = block >> conf->preshift;
-               sector_div(x, (u32)conf->hash_spacing);
-               zone = conf->hash_table[x];
-       }
-       while (block >= (zone->zone_offset + zone->size)) 
-               zone++;
-    
-       sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
-
-
-       {
-               sector_t x =  (block - zone->zone_offset) >> chunksize_bits;
-
-               sector_div(x, zone->nb_dev);
-               chunk = x;
-               BUG_ON(x != (sector_t)chunk);
-
-               x = block >> chunksize_bits;
-               tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
-       }
-       rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
-               + sect_in_chunk;
+       sector_offset = bio->bi_sector;
+       zone =  find_zone(mddev->private, &sector_offset);
+       tmp_dev = map_sector(mddev, zone, bio->bi_sector,
+                            &sector_offset);
        bio->bi_bdev = tmp_dev->bdev;
-       bio->bi_sector = rsect + tmp_dev->data_offset;
-
+       bio->bi_sector = sector_offset + zone->dev_start +
+               tmp_dev->data_offset;
        /*
         * Let the main block layer submit the IO and resolve recursion:
         */
@@ -481,62 +505,71 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
 
 bad_map:
        printk("raid0_make_request bug: can't convert block across chunks"
-               " or bigger than %dk %llu %d\n", chunk_size, 
+               " or bigger than %dk %llu %d\n", chunk_sects / 2,
                (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
-       bio_io_error(bio, bio->bi_size);
+       bio_io_error(bio);
        return 0;
 }
-                          
-static void raid0_status (struct seq_file *seq, mddev_t *mddev)
+
+static void raid0_status(struct seq_file *seq, mddev_t *mddev)
 {
 #undef MD_DEBUG
 #ifdef MD_DEBUG
        int j, k, h;
        char b[BDEVNAME_SIZE];
-       raid0_conf_t *conf = mddev_to_conf(mddev);
-  
+       raid0_conf_t *conf = mddev->private;
+
+       sector_t zone_size;
+       sector_t zone_start = 0;
        h = 0;
+
        for (j = 0; j < conf->nr_strip_zones; j++) {
                seq_printf(seq, "      z%d", j);
-               if (conf->hash_table[h] == conf->strip_zone+j)
-                       seq_printf("(h%d)", h++);
                seq_printf(seq, "=[");
                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
-                       seq_printf (seq, "%s/", bdevname(
-                               conf->strip_zone[j].dev[k]->bdev,b));
-
-               seq_printf (seq, "] zo=%d do=%d s=%d\n",
-                               conf->strip_zone[j].zone_offset,
-                               conf->strip_zone[j].dev_offset,
-                               conf->strip_zone[j].size);
+                       seq_printf(seq, "%s/", bdevname(
+                               conf->devlist[j*mddev->raid_disks + k]
+                                               ->bdev, b));
+
+               zone_size  = conf->strip_zone[j].zone_end - zone_start;
+               seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
+                       (unsigned long long)zone_start>>1,
+                       (unsigned long long)conf->strip_zone[j].dev_start>>1,
+                       (unsigned long long)zone_size>>1);
+               zone_start = conf->strip_zone[j].zone_end;
        }
 #endif
-       seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
+       seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
        return;
 }
 
-static mdk_personality_t raid0_personality=
+static struct mdk_personality raid0_personality=
 {
        .name           = "raid0",
+       .level          = 0,
        .owner          = THIS_MODULE,
        .make_request   = raid0_make_request,
        .run            = raid0_run,
        .stop           = raid0_stop,
        .status         = raid0_status,
+       .size           = raid0_size,
 };
 
 static int __init raid0_init (void)
 {
-       return register_md_personality (RAID0, &raid0_personality);
+       return register_md_personality (&raid0_personality);
 }
 
 static void raid0_exit (void)
 {
-       unregister_md_personality (RAID0);
+       unregister_md_personality (&raid0_personality);
 }
 
 module_init(raid0_init);
 module_exit(raid0_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
 MODULE_ALIAS("md-personality-2"); /* RAID0 */
+MODULE_ALIAS("md-raid0");
+MODULE_ALIAS("md-level-0");