d_path: Make seq_path() use a struct path argument
[safe/jmp/linux-2.6] / drivers / md / md.c
index a9852db..7da6ec2 100644 (file)
@@ -33,6 +33,7 @@
 */
 
 #include <linux/module.h>
+#include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/linkage.h>
 #include <linux/raid/md.h>
@@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
  * Any code which breaks out of this loop while own
  * a reference to the current mddev and must mddev_put it.
  */
-#define ITERATE_MDDEV(mddev,tmp)                                       \
+#define for_each_mddev(mddev,tmp)                                      \
                                                                        \
        for (({ spin_lock(&all_mddevs_lock);                            \
                tmp = all_mddevs.next;                                  \
@@ -210,9 +211,9 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
                )
 
 
-static int md_fail_request (request_queue_t *q, struct bio *bio)
+static int md_fail_request (struct request_queue *q, struct bio *bio)
 {
-       bio_io_error(bio, bio->bi_size);
+       bio_io_error(bio);
        return 0;
 }
 
@@ -230,7 +231,7 @@ static void mddev_put(mddev_t *mddev)
                list_del(&mddev->all_mddevs);
                spin_unlock(&all_mddevs_lock);
                blk_cleanup_queue(mddev->queue);
-               kobject_unregister(&mddev->kobj);
+               kobject_put(&mddev->kobj);
        } else
                spin_unlock(&all_mddevs_lock);
 }
@@ -273,6 +274,8 @@ static mddev_t * mddev_find(dev_t unit)
        atomic_set(&new->active, 1);
        spin_lock_init(&new->write_lock);
        init_waitqueue_head(&new->sb_wait);
+       new->reshape_position = MaxSector;
+       new->resync_max = MaxSector;
 
        new->queue = blk_alloc_queue(GFP_KERNEL);
        if (!new->queue) {
@@ -308,7 +311,7 @@ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
        mdk_rdev_t * rdev;
        struct list_head *tmp;
 
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (rdev->desc_nr == nr)
                        return rdev;
        }
@@ -320,7 +323,7 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
        struct list_head *tmp;
        mdk_rdev_t *rdev;
 
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (rdev->bdev->bd_dev == dev)
                        return rdev;
        }
@@ -382,12 +385,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
 }
 
 
-static int super_written(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written(struct bio *bio, int error)
 {
        mdk_rdev_t *rdev = bio->bi_private;
        mddev_t *mddev = rdev->mddev;
-       if (bio->bi_size)
-               return 1;
 
        if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
                printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -399,16 +400,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
        if (atomic_dec_and_test(&mddev->pending_writes))
                wake_up(&mddev->sb_wait);
        bio_put(bio);
-       return 0;
 }
 
-static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
+static void super_written_barrier(struct bio *bio, int error)
 {
        struct bio *bio2 = bio->bi_private;
        mdk_rdev_t *rdev = bio2->bi_private;
        mddev_t *mddev = rdev->mddev;
-       if (bio->bi_size)
-               return 1;
 
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
            error == -EOPNOTSUPP) {
@@ -422,11 +420,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
                spin_unlock_irqrestore(&mddev->write_lock, flags);
                wake_up(&mddev->sb_wait);
                bio_put(bio);
-               return 0;
+       } else {
+               bio_put(bio2);
+               bio->bi_private = rdev;
+               super_written(bio, error);
        }
-       bio_put(bio2);
-       bio->bi_private = rdev;
-       return super_written(bio, bytes_done, error);
 }
 
 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -487,13 +485,9 @@ void md_super_wait(mddev_t *mddev)
        finish_wait(&mddev->sb_wait, &wq);
 }
 
-static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
+static void bi_complete(struct bio *bio, int error)
 {
-       if (bio->bi_size)
-               return 1;
-
        complete((struct completion*)bio->bi_private);
-       return 0;
 }
 
 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
@@ -589,14 +583,41 @@ abort:
        return ret;
 }
 
+
+static u32 md_csum_fold(u32 csum)
+{
+       csum = (csum & 0xffff) + (csum >> 16);
+       return (csum & 0xffff) + (csum >> 16);
+}
+
 static unsigned int calc_sb_csum(mdp_super_t * sb)
 {
+       u64 newcsum = 0;
+       u32 *sb32 = (u32*)sb;
+       int i;
        unsigned int disk_csum, csum;
 
        disk_csum = sb->sb_csum;
        sb->sb_csum = 0;
-       csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
+
+       for (i = 0; i < MD_SB_BYTES/4 ; i++)
+               newcsum += sb32[i];
+       csum = (newcsum & 0xffffffff) + (newcsum>>32);
+
+
+#ifdef CONFIG_ALPHA
+       /* This used to use csum_partial, which was wrong for several
+        * reasons including that different results are returned on
+        * different architectures.  It isn't critical that we get exactly
+        * the same return value as before (we always csum_fold before
+        * testing, and that removes any differences).  However as we
+        * know that csum_partial always returned a 16bit value on
+        * alphas, do a fold to maximise conformity to previous behaviour.
+        */
+       sb->sb_csum = md_csum_fold(disk_csum);
+#else
        sb->sb_csum = disk_csum;
+#endif
        return csum;
 }
 
@@ -684,7 +705,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        if (sb->raid_disks <= 0)
                goto abort;
 
-       if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
+       if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
                printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
                        b);
                goto abort;
@@ -694,6 +715,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        rdev->data_offset = 0;
        rdev->sb_size = MD_SB_BYTES;
 
+       if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
+               if (sb->level != 1 && sb->level != 4
+                   && sb->level != 5 && sb->level != 6
+                   && sb->level != 10) {
+                       /* FIXME use a better test */
+                       printk(KERN_WARNING
+                              "md: bitmaps not supported for this level.\n");
+                       goto abort;
+               }
+       }
+
        if (sb->level == LEVEL_MULTIPATH)
                rdev->desc_nr = -1;
        else
@@ -742,12 +774,16 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
        __u64 ev1 = md_event(sb);
 
        rdev->raid_disk = -1;
-       rdev->flags = 0;
+       clear_bit(Faulty, &rdev->flags);
+       clear_bit(In_sync, &rdev->flags);
+       clear_bit(WriteMostly, &rdev->flags);
+       clear_bit(BarriersNotsupp, &rdev->flags);
+
        if (mddev->raid_disks == 0) {
                mddev->major_version = 0;
                mddev->minor_version = sb->minor_version;
                mddev->patch_version = sb->patch_version;
-               mddev->persistent = ! sb->not_persistent;
+               mddev->external = 0;
                mddev->chunk_size = sb->chunk_size;
                mddev->ctime = sb->ctime;
                mddev->utime = sb->utime;
@@ -792,16 +828,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks = MD_SB_DISKS;
 
                if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
-                   mddev->bitmap_file == NULL) {
-                       if (mddev->level != 1 && mddev->level != 4
-                           && mddev->level != 5 && mddev->level != 6
-                           && mddev->level != 10) {
-                               /* FIXME use a better test */
-                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
-                               return -EINVAL;
-                       }
+                   mddev->bitmap_file == NULL)
                        mddev->bitmap_offset = mddev->default_bitmap_offset;
-               }
 
        } else if (mddev->pers == NULL) {
                /* Insist on good event counter while assembling */
@@ -881,7 +909,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->size  = mddev->size;
        sb->raid_disks = mddev->raid_disks;
        sb->md_minor = mddev->md_minor;
-       sb->not_persistent = !mddev->persistent;
+       sb->not_persistent = 0;
        sb->utime = mddev->utime;
        sb->state = 0;
        sb->events_hi = (mddev->events>>32);
@@ -915,7 +943,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
-       ITERATE_RDEV(mddev,rdev2,tmp) {
+       rdev_for_each(rdev2, tmp, mddev) {
                mdp_disk_t *d;
                int desc_nr;
                if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -1058,6 +1086,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                       bdevname(rdev->bdev,b));
                return -EINVAL;
        }
+       if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
+               if (sb->level != cpu_to_le32(1) &&
+                   sb->level != cpu_to_le32(4) &&
+                   sb->level != cpu_to_le32(5) &&
+                   sb->level != cpu_to_le32(6) &&
+                   sb->level != cpu_to_le32(10)) {
+                       printk(KERN_WARNING
+                              "md: bitmaps not supported for this level.\n");
+                       return -EINVAL;
+               }
+       }
+
        rdev->preferred_minor = 0xffff;
        rdev->data_offset = le64_to_cpu(sb->data_offset);
        atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1118,11 +1158,15 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
        __u64 ev1 = le64_to_cpu(sb->events);
 
        rdev->raid_disk = -1;
-       rdev->flags = 0;
+       clear_bit(Faulty, &rdev->flags);
+       clear_bit(In_sync, &rdev->flags);
+       clear_bit(WriteMostly, &rdev->flags);
+       clear_bit(BarriersNotsupp, &rdev->flags);
+
        if (mddev->raid_disks == 0) {
                mddev->major_version = 1;
                mddev->patch_version = 0;
-               mddev->persistent = 1;
+               mddev->external = 0;
                mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
                mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
                mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks =  (4096-256)/2;
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
-                   mddev->bitmap_file == NULL ) {
-                       if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
-                           && mddev->level != 10) {
-                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
-                               return -EINVAL;
-                       }
+                   mddev->bitmap_file == NULL )
                        mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
-               }
+
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
                        mddev->reshape_position = le64_to_cpu(sb->reshape_position);
                        mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -1256,15 +1295,16 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        }
 
        max_dev = 0;
-       ITERATE_RDEV(mddev,rdev2,tmp)
+       rdev_for_each(rdev2, tmp, mddev)
                if (rdev2->desc_nr+1 > max_dev)
                        max_dev = rdev2->desc_nr+1;
-       
-       sb->max_dev = cpu_to_le32(max_dev);
+
+       if (max_dev > le32_to_cpu(sb->max_dev))
+               sb->max_dev = cpu_to_le32(max_dev);
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
-       ITERATE_RDEV(mddev,rdev2,tmp) {
+       rdev_for_each(rdev2, tmp, mddev) {
                i = rdev2->desc_nr;
                if (test_bit(Faulty, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1302,8 +1342,8 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
        struct list_head *tmp, *tmp2;
        mdk_rdev_t *rdev, *rdev2;
 
-       ITERATE_RDEV(mddev1,rdev,tmp)
-               ITERATE_RDEV(mddev2, rdev2, tmp2)
+       rdev_for_each(rdev, tmp, mddev1)
+               rdev_for_each(rdev2, tmp2, mddev2)
                        if (rdev->bdev->bd_contains ==
                            rdev2->bdev->bd_contains)
                                return 1;
@@ -1318,6 +1358,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        char b[BDEVNAME_SIZE];
        struct kobject *ko;
        char *s;
+       int err;
 
        if (rdev->mddev) {
                MD_BUG();
@@ -1325,10 +1366,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        }
        /* make sure rdev->size exceeds mddev->size */
        if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
-               if (mddev->pers)
-                       /* Cannot change size, so fail */
-                       return -ENOSPC;
-               else
+               if (mddev->pers) {
+                       /* Cannot change size, so fail
+                        * If mddev->level <= 0, then we don't care
+                        * about aligning sizes (e.g. linear)
+                        */
+                       if (mddev->level > 0)
+                               return -ENOSPC;
+               } else
                        mddev->size = rdev->size;
        }
 
@@ -1347,25 +1392,38 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                        return -EBUSY;
        }
        bdevname(rdev->bdev,b);
-       if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
-               return -ENOMEM;
-       while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
+       while ( (s=strchr(b, '/')) != NULL)
                *s = '!';
-                       
-       list_add(&rdev->same_set, &mddev->disks);
+
        rdev->mddev = mddev;
        printk(KERN_INFO "md: bind<%s>\n", b);
 
-       rdev->kobj.parent = &mddev->kobj;
-       kobject_add(&rdev->kobj);
+       if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
+               goto fail;
 
        if (rdev->bdev->bd_part)
-               ko = &rdev->bdev->bd_part->kobj;
+               ko = &rdev->bdev->bd_part->dev.kobj;
        else
-               ko = &rdev->bdev->bd_disk->kobj;
-       sysfs_create_link(&rdev->kobj, ko, "block");
-       bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
+               ko = &rdev->bdev->bd_disk->dev.kobj;
+       if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
+               kobject_del(&rdev->kobj);
+               goto fail;
+       }
+       list_add(&rdev->same_set, &mddev->disks);
+       bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
        return 0;
+
+ fail:
+       printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
+              b, mdname(mddev));
+       return err;
+}
+
+static void md_delayed_delete(struct work_struct *ws)
+{
+       mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
+       kobject_del(&rdev->kobj);
+       kobject_put(&rdev->kobj);
 }
 
 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
@@ -1380,7 +1438,13 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
        printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
        rdev->mddev = NULL;
        sysfs_remove_link(&rdev->kobj, "block");
-       kobject_del(&rdev->kobj);
+
+       /* We need to delay this, otherwise we can deadlock when
+        * writing to 'remove' to "dev/state"
+        */
+       INIT_WORK(&rdev->del_work, md_delayed_delete);
+       kobject_get(&rdev->kobj);
+       schedule_work(&rdev->del_work);
 }
 
 /*
@@ -1388,7 +1452,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
  * otherwise reused by a RAID array (or any other kernel
  * subsystem), by bd_claiming the device.
  */
-static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
+static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
 {
        int err = 0;
        struct block_device *bdev;
@@ -1400,13 +1464,15 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
                        __bdevname(dev, b));
                return PTR_ERR(bdev);
        }
-       err = bd_claim(bdev, rdev);
+       err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
        if (err) {
                printk(KERN_ERR "md: could not bd_claim %s.\n",
                        bdevname(bdev, b));
                blkdev_put(bdev);
                return err;
        }
+       if (!shared)
+               set_bit(AllReserved, &rdev->flags);
        rdev->bdev = bdev;
        return err;
 }
@@ -1450,7 +1516,7 @@ static void export_array(mddev_t *mddev)
        struct list_head *tmp;
        mdk_rdev_t *rdev;
 
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (!rdev->mddev) {
                        MD_BUG();
                        continue;
@@ -1528,17 +1594,17 @@ static void md_print_devices(void)
        printk("md:     **********************************\n");
        printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
        printk("md:     **********************************\n");
-       ITERATE_MDDEV(mddev,tmp) {
+       for_each_mddev(mddev, tmp) {
 
                if (mddev->bitmap)
                        bitmap_print_sb(mddev->bitmap);
                else
                        printk("%s: ", mdname(mddev));
-               ITERATE_RDEV(mddev,rdev,tmp2)
+               rdev_for_each(rdev, tmp2, mddev)
                        printk("<%s>", bdevname(rdev->bdev,b));
                printk("\n");
 
-               ITERATE_RDEV(mddev,rdev,tmp2)
+               rdev_for_each(rdev, tmp2, mddev)
                        print_rdev(rdev);
        }
        printk("md:     **********************************\n");
@@ -1557,7 +1623,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
        mdk_rdev_t *rdev;
        struct list_head *tmp;
 
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
                     rdev->raid_disk < 0 &&
@@ -1575,7 +1641,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
 
 static void md_update_sb(mddev_t * mddev, int force_change)
 {
-       int err;
        struct list_head *tmp;
        mdk_rdev_t *rdev;
        int sync_req;
@@ -1644,26 +1709,28 @@ repeat:
                MD_BUG();
                mddev->events --;
        }
-       sync_sbs(mddev, nospares);
 
        /*
         * do not write anything to disk if using
         * nonpersistent superblocks
         */
        if (!mddev->persistent) {
-               clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+               if (!mddev->external)
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+
                spin_unlock_irq(&mddev->write_lock);
                wake_up(&mddev->sb_wait);
                return;
        }
+       sync_sbs(mddev, nospares);
        spin_unlock_irq(&mddev->write_lock);
 
        dprintk(KERN_INFO 
                "md: updating %s RAID superblock on device (in sync %d)\n",
                mdname(mddev),mddev->in_sync);
 
-       err = bitmap_update_sb(mddev->bitmap);
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       bitmap_update_sb(mddev->bitmap);
+       rdev_for_each(rdev, tmp, mddev) {
                char b[BDEVNAME_SIZE];
                dprintk(KERN_INFO "md: ");
                if (rdev->sb_loaded != 1)
@@ -1733,7 +1800,7 @@ static ssize_t
 state_show(mdk_rdev_t *rdev, char *page)
 {
        char *sep = "";
-       int len=0;
+       size_t len = 0;
 
        if (test_bit(Faulty, &rdev->flags)) {
                len+= sprintf(page+len, "%sfaulty",sep);
@@ -1835,20 +1902,45 @@ static ssize_t
 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 {
        char *e;
+       int err;
+       char nm[20];
        int slot = simple_strtoul(buf, &e, 10);
        if (strncmp(buf, "none", 4)==0)
                slot = -1;
        else if (e==buf || (*e && *e!= '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers)
-               /* Cannot set slot in active array (yet) */
-               return -EBUSY;
-       if (slot >= rdev->mddev->raid_disks)
-               return -ENOSPC;
-       rdev->raid_disk = slot;
-       /* assume it is working */
-       rdev->flags = 0;
-       set_bit(In_sync, &rdev->flags);
+       if (rdev->mddev->pers) {
+               /* Setting 'slot' on an active array requires also
+                * updating the 'rd%d' link, and communicating
+                * with the personality with ->hot_*_disk.
+                * For now we only support removing
+                * failed/spare devices.  This normally happens automatically,
+                * but not when the metadata is externally managed.
+                */
+               if (slot != -1)
+                       return -EBUSY;
+               if (rdev->raid_disk == -1)
+                       return -EEXIST;
+               /* personality does all needed checks */
+               if (rdev->mddev->pers->hot_add_disk == NULL)
+                       return -EINVAL;
+               err = rdev->mddev->pers->
+                       hot_remove_disk(rdev->mddev, rdev->raid_disk);
+               if (err)
+                       return err;
+               sprintf(nm, "rd%d", rdev->raid_disk);
+               sysfs_remove_link(&rdev->mddev->kobj, nm);
+               set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+               md_wakeup_thread(rdev->mddev->thread);
+       } else {
+               if (slot >= rdev->mddev->raid_disks)
+                       return -ENOSPC;
+               rdev->raid_disk = slot;
+               /* assume it is working */
+               clear_bit(Faulty, &rdev->flags);
+               clear_bit(WriteMostly, &rdev->flags);
+               set_bit(In_sync, &rdev->flags);
+       }
        return len;
 }
 
@@ -1871,6 +1963,10 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                return -EINVAL;
        if (rdev->mddev->pers)
                return -EBUSY;
+       if (rdev->size && rdev->mddev->external)
+               /* Must set offset before size, so overlap checks
+                * can be sane */
+               return -EBUSY;
        rdev->data_offset = offset;
        return len;
 }
@@ -1884,16 +1980,69 @@ rdev_size_show(mdk_rdev_t *rdev, char *page)
        return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
 }
 
+static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+{
+       /* check if two start/length pairs overlap */
+       if (s1+l1 <= s2)
+               return 0;
+       if (s2+l2 <= s1)
+               return 0;
+       return 1;
+}
+
 static ssize_t
 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 {
        char *e;
        unsigned long long size = simple_strtoull(buf, &e, 10);
+       unsigned long long oldsize = rdev->size;
        if (e==buf || (*e && *e != '\n'))
                return -EINVAL;
        if (rdev->mddev->pers)
                return -EBUSY;
        rdev->size = size;
+       if (size > oldsize && rdev->mddev->external) {
+               /* need to check that all other rdevs with the same ->bdev
+                * do not overlap.  We need to unlock the mddev to avoid
+                * a deadlock.  We have already changed rdev->size, and if
+                * we have to change it back, we will have the lock again.
+                */
+               mddev_t *mddev;
+               int overlap = 0;
+               struct list_head *tmp, *tmp2;
+
+               mddev_unlock(rdev->mddev);
+               for_each_mddev(mddev, tmp) {
+                       mdk_rdev_t *rdev2;
+
+                       mddev_lock(mddev);
+                       rdev_for_each(rdev2, tmp2, mddev)
+                               if (test_bit(AllReserved, &rdev2->flags) ||
+                                   (rdev->bdev == rdev2->bdev &&
+                                    rdev != rdev2 &&
+                                    overlaps(rdev->data_offset, rdev->size,
+                                           rdev2->data_offset, rdev2->size))) {
+                                       overlap = 1;
+                                       break;
+                               }
+                       mddev_unlock(mddev);
+                       if (overlap) {
+                               mddev_put(mddev);
+                               break;
+                       }
+               }
+               mddev_lock(rdev->mddev);
+               if (overlap) {
+                       /* Someone else could have slipped in a size
+                        * change here, but doing so is just silly.
+                        * We put oldsize back because we *know* it is
+                        * safe, and trust userspace not to race with
+                        * itself
+                        */
+                       rdev->size = oldsize;
+                       return -EBUSY;
+               }
+       }
        if (size < rdev->mddev->size || rdev->mddev->size == 0)
                rdev->mddev->size = size;
        return len;
@@ -1928,12 +2077,18 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
 {
        struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
        mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+       int rv;
 
        if (!entry->store)
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
-       return entry->store(rdev, page, length);
+       rv = mddev_lock(rdev->mddev);
+       if (!rv) {
+               rv = entry->store(rdev, page, length);
+               mddev_unlock(rdev->mddev);
+       }
+       return rv;
 }
 
 static void rdev_free(struct kobject *ko)
@@ -1977,13 +2132,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
        if ((err = alloc_disk_sb(rdev)))
                goto abort_free;
 
-       err = lock_rdev(rdev, newdev);
+       err = lock_rdev(rdev, newdev, super_format == -2);
        if (err)
                goto abort_free;
 
-       rdev->kobj.parent = NULL;
-       rdev->kobj.ktype = &rdev_ktype;
-       kobject_init(&rdev->kobj);
+       kobject_init(&rdev->kobj, &rdev_ktype);
 
        rdev->desc_nr = -1;
        rdev->saved_raid_disk = -1;
@@ -2008,9 +2161,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
                err = super_types[super_format].
                        load_super(rdev, NULL, super_minor);
                if (err == -EINVAL) {
-                       printk(KERN_WARNING 
-                               "md: %s has invalid sb, not importing!\n",
-                               bdevname(rdev->bdev,b));
+                       printk(KERN_WARNING
+                               "md: %s does not have a valid v%d.%d "
+                              "superblock, not importing!\n",
+                               bdevname(rdev->bdev,b),
+                              super_format, super_minor);
                        goto abort_free;
                }
                if (err < 0) {
@@ -2047,7 +2202,7 @@ static void analyze_sbs(mddev_t * mddev)
        char b[BDEVNAME_SIZE];
 
        freshest = NULL;
-       ITERATE_RDEV(mddev,rdev,tmp)
+       rdev_for_each(rdev, tmp, mddev)
                switch (super_types[mddev->major_version].
                        load_super(rdev, freshest, mddev->minor_version)) {
                case 1:
@@ -2068,7 +2223,7 @@ static void analyze_sbs(mddev_t * mddev)
                validate_super(mddev, freshest);
 
        i = 0;
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (rdev != freshest)
                        if (super_types[mddev->major_version].
                            validate_super(mddev, rdev)) {
@@ -2082,6 +2237,9 @@ static void analyze_sbs(mddev_t * mddev)
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
                        set_bit(In_sync, &rdev->flags);
+               } else if (rdev->raid_disk >= mddev->raid_disks) {
+                       rdev->raid_disk = -1;
+                       clear_bit(In_sync, &rdev->flags);
                }
        }
 
@@ -2160,7 +2318,7 @@ level_show(mddev_t *mddev, char *page)
 static ssize_t
 level_store(mddev_t *mddev, const char *buf, size_t len)
 {
-       int rv = len;
+       ssize_t rv = len;
        if (mddev->pers)
                return -EBUSY;
        if (len == 0)
@@ -2183,6 +2341,10 @@ static ssize_t
 layout_show(mddev_t *mddev, char *page)
 {
        /* just a number, not meaningful for all levels */
+       if (mddev->reshape_position != MaxSector &&
+           mddev->layout != mddev->new_layout)
+               return sprintf(page, "%d (%d)\n",
+                              mddev->new_layout, mddev->layout);
        return sprintf(page, "%d\n", mddev->layout);
 }
 
@@ -2191,13 +2353,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
 {
        char *e;
        unsigned long n = simple_strtoul(buf, &e, 10);
-       if (mddev->pers)
-               return -EBUSY;
 
        if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
-       mddev->layout = n;
+       if (mddev->pers)
+               return -EBUSY;
+       if (mddev->reshape_position != MaxSector)
+               mddev->new_layout = n;
+       else
+               mddev->layout = n;
        return len;
 }
 static struct md_sysfs_entry md_layout =
@@ -2209,6 +2374,10 @@ raid_disks_show(mddev_t *mddev, char *page)
 {
        if (mddev->raid_disks == 0)
                return 0;
+       if (mddev->reshape_position != MaxSector &&
+           mddev->delta_disks != 0)
+               return sprintf(page, "%d (%d)\n", mddev->raid_disks,
+                              mddev->raid_disks - mddev->delta_disks);
        return sprintf(page, "%d\n", mddev->raid_disks);
 }
 
@@ -2226,7 +2395,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
 
        if (mddev->pers)
                rv = update_raid_disks(mddev, n);
-       else
+       else if (mddev->reshape_position != MaxSector) {
+               int olddisks = mddev->raid_disks - mddev->delta_disks;
+               mddev->delta_disks = n - olddisks;
+               mddev->raid_disks = n;
+       } else
                mddev->raid_disks = n;
        return rv ? rv : len;
 }
@@ -2236,6 +2409,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
 static ssize_t
 chunk_size_show(mddev_t *mddev, char *page)
 {
+       if (mddev->reshape_position != MaxSector &&
+           mddev->chunk_size != mddev->new_chunk)
+               return sprintf(page, "%d (%d)\n", mddev->new_chunk,
+                              mddev->chunk_size);
        return sprintf(page, "%d\n", mddev->chunk_size);
 }
 
@@ -2246,12 +2423,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
        char *e;
        unsigned long n = simple_strtoul(buf, &e, 10);
 
-       if (mddev->pers)
-               return -EBUSY;
        if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
-       mddev->chunk_size = n;
+       if (mddev->pers)
+               return -EBUSY;
+       else if (mddev->reshape_position != MaxSector)
+               mddev->new_chunk = n;
+       else
+               mddev->chunk_size = n;
        return len;
 }
 static struct md_sysfs_entry md_chunk_size =
@@ -2348,6 +2528,8 @@ array_state_show(mddev_t *mddev, char *page)
                case 0:
                        if (mddev->in_sync)
                                st = clean;
+                       else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+                               st = write_pending;
                        else if (mddev->safemode)
                                st = active_idle;
                        else
@@ -2378,11 +2560,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                break;
        case clear:
                /* stopping an active array */
-               if (mddev->pers) {
-                       if (atomic_read(&mddev->active) > 1)
-                               return -EBUSY;
-                       err = do_md_stop(mddev, 0);
-               }
+               if (atomic_read(&mddev->active) > 1)
+                       return -EBUSY;
+               err = do_md_stop(mddev, 0);
                break;
        case inactive:
                /* stopping an active array */
@@ -2390,7 +2570,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        if (atomic_read(&mddev->active) > 1)
                                return -EBUSY;
                        err = do_md_stop(mddev, 2);
-               }
+               } else
+                       err = 0; /* already inactive */
                break;
        case suspended:
                break; /* not supported yet */
@@ -2418,9 +2599,15 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        restart_array(mddev);
                        spin_lock_irq(&mddev->write_lock);
                        if (atomic_read(&mddev->writes_pending) == 0) {
-                               mddev->in_sync = 1;
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
-                       }
+                               if (mddev->in_sync == 0) {
+                                       mddev->in_sync = 1;
+                                       if (mddev->persistent)
+                                               set_bit(MD_CHANGE_CLEAN,
+                                                       &mddev->flags);
+                               }
+                               err = 0;
+                       } else
+                               err = -EBUSY;
                        spin_unlock_irq(&mddev->write_lock);
                } else {
                        mddev->ro = 0;
@@ -2431,7 +2618,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
        case active:
                if (mddev->pers) {
                        restart_array(mddev);
-                       clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       if (mddev->external)
+                               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        wake_up(&mddev->sb_wait);
                        err = 0;
                } else {
@@ -2497,7 +2685,9 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len)
                        if (err < 0)
                                goto out;
                }
-       } else
+       } else if (mddev->external)
+               rdev = md_import_device(dev, -2, -1);
+       else
                rdev = md_import_device(dev, -1, -1);
 
        if (IS_ERR(rdev))
@@ -2582,7 +2772,9 @@ __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
 
 
 /* Metdata version.
- * This is either 'none' for arrays with externally managed metadata,
+ * This is one of
+ *   'none' for arrays with no metadata (good luck...)
+ *   'external' for arrays with externally managed metadata,
  * or N.M for internally known formats
  */
 static ssize_t
@@ -2591,6 +2783,8 @@ metadata_show(mddev_t *mddev, char *page)
        if (mddev->persistent)
                return sprintf(page, "%d.%d\n",
                               mddev->major_version, mddev->minor_version);
+       else if (mddev->external)
+               return sprintf(page, "external:%s\n", mddev->metadata_type);
        else
                return sprintf(page, "none\n");
 }
@@ -2605,6 +2799,21 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
 
        if (cmd_match(buf, "none")) {
                mddev->persistent = 0;
+               mddev->external = 0;
+               mddev->major_version = 0;
+               mddev->minor_version = 90;
+               return len;
+       }
+       if (strncmp(buf, "external:", 9) == 0) {
+               size_t namelen = len-9;
+               if (namelen >= sizeof(mddev->metadata_type))
+                       namelen = sizeof(mddev->metadata_type)-1;
+               strncpy(mddev->metadata_type, buf+9, namelen);
+               mddev->metadata_type[namelen] = 0;
+               if (namelen && mddev->metadata_type[namelen-1] == '\n')
+                       mddev->metadata_type[--namelen] = 0;
+               mddev->persistent = 0;
+               mddev->external = 1;
                mddev->major_version = 0;
                mddev->minor_version = 90;
                return len;
@@ -2616,12 +2825,12 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
        minor = simple_strtoul(buf, &e, 10);
        if (e==buf || (*e && *e != '\n') )
                return -EINVAL;
-       if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
-           super_types[major].name == NULL)
+       if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
                return -ENOENT;
        mddev->major_version = major;
        mddev->minor_version = minor;
        mddev->persistent = 1;
+       mddev->external = 0;
        return len;
 }
 
@@ -2633,7 +2842,7 @@ action_show(mddev_t *mddev, char *page)
 {
        char *type = "idle";
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
-           test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
+           (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
                        type = "reshape";
                else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -2752,6 +2961,12 @@ sync_max_store(mddev_t *mddev, const char *buf, size_t len)
 static struct md_sysfs_entry md_sync_max =
 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
 
+static ssize_t
+degraded_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%d\n", mddev->degraded);
+}
+static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
 
 static ssize_t
 sync_speed_show(mddev_t *mddev, char *page)
@@ -2783,6 +2998,43 @@ sync_completed_show(mddev_t *mddev, char *page)
 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
 
 static ssize_t
+max_sync_show(mddev_t *mddev, char *page)
+{
+       if (mddev->resync_max == MaxSector)
+               return sprintf(page, "max\n");
+       else
+               return sprintf(page, "%llu\n",
+                              (unsigned long long)mddev->resync_max);
+}
+static ssize_t
+max_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       if (strncmp(buf, "max", 3) == 0)
+               mddev->resync_max = MaxSector;
+       else {
+               char *ep;
+               unsigned long long max = simple_strtoull(buf, &ep, 10);
+               if (ep == buf || (*ep != 0 && *ep != '\n'))
+                       return -EINVAL;
+               if (max < mddev->resync_max &&
+                   test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+                       return -EBUSY;
+
+               /* Must be a multiple of chunk_size */
+               if (mddev->chunk_size) {
+                       if (max & (sector_t)((mddev->chunk_size>>9)-1))
+                               return -EINVAL;
+               }
+               mddev->resync_max = max;
+       }
+       wake_up(&mddev->recovery_wait);
+       return len;
+}
+
+static struct md_sysfs_entry md_max_sync =
+__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
+
+static ssize_t
 suspend_lo_show(mddev_t *mddev, char *page)
 {
        return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
@@ -2838,6 +3090,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
 static struct md_sysfs_entry md_suspend_hi =
 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
 
+static ssize_t
+reshape_position_show(mddev_t *mddev, char *page)
+{
+       if (mddev->reshape_position != MaxSector)
+               return sprintf(page, "%llu\n",
+                              (unsigned long long)mddev->reshape_position);
+       strcpy(page, "none\n");
+       return 5;
+}
+
+static ssize_t
+reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long new = simple_strtoull(buf, &e, 10);
+       if (mddev->pers)
+               return -EBUSY;
+       if (buf == e || (*e && *e != '\n'))
+               return -EINVAL;
+       mddev->reshape_position = new;
+       mddev->delta_disks = 0;
+       mddev->new_level = mddev->level;
+       mddev->new_layout = mddev->layout;
+       mddev->new_chunk = mddev->chunk_size;
+       return len;
+}
+
+static struct md_sysfs_entry md_reshape_position =
+__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
+       reshape_position_store);
+
 
 static struct attribute *md_default_attrs[] = {
        &md_level.attr,
@@ -2850,6 +3133,7 @@ static struct attribute *md_default_attrs[] = {
        &md_new_device.attr,
        &md_safe_delay.attr,
        &md_array_state.attr,
+       &md_reshape_position.attr,
        NULL,
 };
 
@@ -2860,9 +3144,11 @@ static struct attribute *md_redundancy_attrs[] = {
        &md_sync_max.attr,
        &md_sync_speed.attr,
        &md_sync_completed.attr,
+       &md_max_sync.attr,
        &md_suspend_lo.attr,
        &md_suspend_hi.attr,
        &md_bitmap.attr,
+       &md_degraded.attr,
        NULL,
 };
 static struct attribute_group md_redundancy_group = {
@@ -2934,6 +3220,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        int partitioned = (MAJOR(dev) != MD_MAJOR);
        int shift = partitioned ? MdpMinorShift : 0;
        int unit = MINOR(dev) >> shift;
+       int error;
 
        if (!mddev)
                return NULL;
@@ -2962,11 +3249,13 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        add_disk(disk);
        mddev->gendisk = disk;
        mutex_unlock(&disks_mutex);
-       mddev->kobj.parent = &disk->kobj;
-       mddev->kobj.k_name = NULL;
-       snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
-       mddev->kobj.ktype = &md_ktype;
-       kobject_register(&mddev->kobj);
+       error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
+                                    "%s", "md");
+       if (error)
+               printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
+                      disk->disk_name);
+       else
+               kobject_uevent(&mddev->kobj, KOBJ_ADD);
        return NULL;
 }
 
@@ -3000,8 +3289,11 @@ static int do_md_run(mddev_t * mddev)
        /*
         * Analyze all RAID superblock(s)
         */
-       if (!mddev->raid_disks)
+       if (!mddev->raid_disks) {
+               if (!mddev->persistent)
+                       return -EINVAL;
                analyze_sbs(mddev);
+       }
 
        chunk_size = mddev->chunk_size;
 
@@ -3025,7 +3317,7 @@ static int do_md_run(mddev_t * mddev)
                }
 
                /* devices must have minimum size of one chunk */
-               ITERATE_RDEV(mddev,rdev,tmp) {
+               rdev_for_each(rdev, tmp, mddev) {
                        if (test_bit(Faulty, &rdev->flags))
                                continue;
                        if (rdev->size < chunk_size / 1024) {
@@ -3051,13 +3343,33 @@ static int do_md_run(mddev_t * mddev)
         * Drop all container device buffers, from now on
         * the only valid external interface is through the md
         * device.
-        * Also find largest hardsector size
         */
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
-               invalidate_bdev(rdev->bdev, 0);
+               invalidate_bdev(rdev->bdev);
+
+               /* perform some consistency tests on the device.
+                * We don't want the data to overlap the metadata,
+                * Internal Bitmap issues has handled elsewhere.
+                */
+               if (rdev->data_offset < rdev->sb_offset) {
+                       if (mddev->size &&
+                           rdev->data_offset + mddev->size*2
+                           > rdev->sb_offset*2) {
+                               printk("md: %s: data overlaps metadata\n",
+                                      mdname(mddev));
+                               return -EINVAL;
+                       }
+               } else {
+                       if (rdev->sb_offset*2 + rdev->sb_size/512
+                           > rdev->data_offset) {
+                               printk("md: %s: metadata overlaps data\n",
+                                      mdname(mddev));
+                               return -EINVAL;
+                       }
+               }
        }
 
        md_probe(mddev->unit, NULL, NULL);
@@ -3098,8 +3410,8 @@ static int do_md_run(mddev_t * mddev)
                mdk_rdev_t *rdev2;
                struct list_head *tmp2;
                int warned = 0;
-               ITERATE_RDEV(mddev, rdev, tmp) {
-                       ITERATE_RDEV(mddev, rdev2, tmp2) {
+               rdev_for_each(rdev, tmp, mddev) {
+                       rdev_for_each(rdev2, tmp2, mddev) {
                                if (rdev < rdev2 &&
                                    rdev->bdev->bd_contains ==
                                    rdev2->bdev->bd_contains) {
@@ -3144,9 +3456,12 @@ static int do_md_run(mddev_t * mddev)
                bitmap_destroy(mddev);
                return err;
        }
-       if (mddev->pers->sync_request)
-               sysfs_create_group(&mddev->kobj, &md_redundancy_group);
-       else if (mddev->ro == 2) /* auto-readonly not meaningful */
+       if (mddev->pers->sync_request) {
+               if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+                       printk(KERN_WARNING
+                              "md: cannot register extra attributes for %s\n",
+                              mdname(mddev));
+       } else if (mddev->ro == 2) /* auto-readonly not meaningful */
                mddev->ro = 0;
 
        atomic_set(&mddev->writes_pending,0);
@@ -3156,11 +3471,13 @@ static int do_md_run(mddev_t * mddev)
        mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
        mddev->in_sync = 1;
 
-       ITERATE_RDEV(mddev,rdev,tmp)
+       rdev_for_each(rdev, tmp, mddev)
                if (rdev->raid_disk >= 0) {
                        char nm[20];
                        sprintf(nm, "rd%d", rdev->raid_disk);
-                       sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+                       if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
+                               printk("md: cannot register %s for %s\n",
+                                      nm, mdname(mddev));
                }
        
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -3187,7 +3504,7 @@ static int do_md_run(mddev_t * mddev)
        if (mddev->degraded && !mddev->sync_thread) {
                struct list_head *rtmp;
                int spares = 0;
-               ITERATE_RDEV(mddev,rdev,rtmp)
+               rdev_for_each(rdev, rtmp, mddev)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(In_sync, &rdev->flags) &&
                            !test_bit(Faulty, &rdev->flags))
@@ -3213,7 +3530,7 @@ static int do_md_run(mddev_t * mddev)
 
        mddev->changed = 1;
        md_new_event(mddev);
-       kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
+       kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
        return 0;
 }
 
@@ -3324,7 +3641,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
                        mddev->pers->stop(mddev);
                        mddev->queue->merge_bvec_fn = NULL;
                        mddev->queue->unplug_fn = NULL;
-                       mddev->queue->issue_flush_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
                        if (mddev->pers->sync_request)
                                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
@@ -3365,19 +3681,26 @@ static int do_md_stop(mddev_t * mddev, int mode)
                }
                mddev->bitmap_offset = 0;
 
-               ITERATE_RDEV(mddev,rdev,tmp)
+               rdev_for_each(rdev, tmp, mddev)
                        if (rdev->raid_disk >= 0) {
                                char nm[20];
                                sprintf(nm, "rd%d", rdev->raid_disk);
                                sysfs_remove_link(&mddev->kobj, nm);
                        }
 
+               /* make sure all md_delayed_delete calls have finished */
+               flush_scheduled_work();
+
                export_array(mddev);
 
                mddev->array_size = 0;
                mddev->size = 0;
                mddev->raid_disks = 0;
                mddev->recovery_cp = 0;
+               mddev->resync_max = MaxSector;
+               mddev->reshape_position = MaxSector;
+               mddev->external = 0;
+               mddev->persistent = 0;
 
        } else if (mddev->pers)
                printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -3400,7 +3723,7 @@ static void autorun_array(mddev_t *mddev)
 
        printk(KERN_INFO "md: running: ");
 
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                char b[BDEVNAME_SIZE];
                printk("<%s>", bdevname(rdev->bdev,b));
        }
@@ -3443,7 +3766,7 @@ static void autorun_devices(int part)
                printk(KERN_INFO "md: considering %s ...\n",
                        bdevname(rdev0->bdev,b));
                INIT_LIST_HEAD(&candidates);
-               ITERATE_RDEV_PENDING(rdev,tmp)
+               rdev_for_each_list(rdev, tmp, pending_raid_disks)
                        if (super_90_load(rdev, rdev0, 0) >= 0) {
                                printk(KERN_INFO "md:  adding %s ...\n",
                                        bdevname(rdev->bdev,b));
@@ -3486,7 +3809,8 @@ static void autorun_devices(int part)
                        mddev_unlock(mddev);
                } else {
                        printk(KERN_INFO "md: created %s\n", mdname(mddev));
-                       ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
+                       mddev->persistent = 1;
+                       rdev_for_each_list(rdev, tmp, candidates) {
                                list_del_init(&rdev->same_set);
                                if (bind_rdev_to_array(rdev, mddev))
                                        export_rdev(rdev);
@@ -3497,7 +3821,7 @@ static void autorun_devices(int part)
                /* on success, candidates will be empty, on error
                 * it won't...
                 */
-               ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
+               rdev_for_each_list(rdev, tmp, candidates)
                        export_rdev(rdev);
                mddev_put(mddev);
        }
@@ -3527,7 +3851,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        struct list_head *tmp;
 
        nr=working=active=failed=spare=0;
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                nr++;
                if (test_bit(Faulty, &rdev->flags))
                        failed++;
@@ -3773,8 +4097,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                else
                        rdev->raid_disk = -1;
 
-               rdev->flags = 0;
-
                if (rdev->raid_disk < mddev->raid_disks)
                        if (info->state & (1<<MD_DISK_SYNC))
                                set_bit(In_sync, &rdev->flags);
@@ -3988,7 +4310,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
        if (info->raid_disks == 0) {
                /* just setting version number for superblock loading */
                if (info->major_version < 0 ||
-                   info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
+                   info->major_version >= ARRAY_SIZE(super_types) ||
                    super_types[info->major_version].name == NULL) {
                        /* maybe try to auto-load a module? */
                        printk(KERN_INFO 
@@ -4019,13 +4341,15 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
        else
                mddev->recovery_cp = 0;
        mddev->persistent    = ! info->not_persistent;
+       mddev->external      = 0;
 
        mddev->layout        = info->layout;
        mddev->chunk_size    = info->chunk_size;
 
        mddev->max_disks     = MD_SB_DISKS;
 
-       mddev->flags         = 0;
+       if (mddev->persistent)
+               mddev->flags         = 0;
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
        mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
@@ -4067,7 +4391,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
         */
        if (mddev->sync_thread)
                return -EBUSY;
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                sector_t avail;
                avail = rdev->size * 2;
 
@@ -4325,9 +4649,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
         */
        /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
         * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
-       if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
-                       && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
-                       && cmd != GET_BITMAP_FILE) {
+       if ((!mddev->raid_disks && !mddev->external)
+           && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
+           && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
+           && cmd != GET_BITMAP_FILE) {
                err = -ENODEV;
                goto abort_unlock;
        }
@@ -4510,7 +4835,6 @@ static int md_thread(void * arg)
         * many dirty RAID5 blocks.
         */
 
-       current->flags |= PF_NOFREEZE;
        allow_signal(SIGKILL);
        while (!kthread_should_stop()) {
 
@@ -4569,7 +4893,7 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
 
 void md_unregister_thread(mdk_thread_t *thread)
 {
-       dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
+       dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
 
        kthread_stop(thread->tsk);
        kfree(thread);
@@ -4612,7 +4936,7 @@ static void status_unused(struct seq_file *seq)
 
        seq_printf(seq, "unused devices: ");
 
-       ITERATE_RDEV_PENDING(rdev,tmp) {
+       rdev_for_each_list(rdev, tmp, pending_raid_disks) {
                char b[BDEVNAME_SIZE];
                i++;
                seq_printf(seq, "%s ",
@@ -4808,7 +5132,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
                }
 
                size = 0;
-               ITERATE_RDEV(mddev,rdev,tmp2) {
+               rdev_for_each(rdev, tmp2, mddev) {
                        char b[BDEVNAME_SIZE];
                        seq_printf(seq, " %s[%d]",
                                bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -4837,7 +5161,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                           mddev->major_version,
                                           mddev->minor_version);
                        }
-               } else
+               } else if (mddev->external)
+                       seq_printf(seq, " super external:%s",
+                                  mddev->metadata_type);
+               else
                        seq_printf(seq, " super non-persistent");
 
                if (mddev->pers) {
@@ -4870,8 +5197,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                chunk_kb ? "KB" : "B");
                        if (bitmap->file) {
                                seq_printf(seq, ", file: ");
-                               seq_path(seq, bitmap->file->f_path.mnt,
-                                        bitmap->file->f_path.dentry," \t\n");
+                               seq_path(seq, &bitmap->file->f_path, " \t\n");
                        }
 
                        seq_printf(seq, "\n");
@@ -4910,15 +5236,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
        return error;
 }
 
-static int md_seq_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = file->private_data;
-       struct mdstat_info *mi = m->private;
-       m->private = NULL;
-       kfree(mi);
-       return seq_release(inode, file);
-}
-
 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
 {
        struct seq_file *m = filp->private_data;
@@ -4940,7 +5257,7 @@ static const struct file_operations md_seq_fops = {
        .open           = md_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = md_seq_release,
+       .release        = seq_release_private,
        .poll           = mdstat_poll,
 };
 
@@ -4967,28 +5284,37 @@ static int is_mddev_idle(mddev_t *mddev)
        mdk_rdev_t * rdev;
        struct list_head *tmp;
        int idle;
-       unsigned long curr_events;
+       long curr_events;
 
        idle = 1;
-       ITERATE_RDEV(mddev,rdev,tmp) {
+       rdev_for_each(rdev, tmp, mddev) {
                struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
                curr_events = disk_stat_read(disk, sectors[0]) + 
                                disk_stat_read(disk, sectors[1]) - 
                                atomic_read(&disk->sync_io);
-               /* The difference between curr_events and last_events
-                * will be affected by any new non-sync IO (making
-                * curr_events bigger) and any difference in the amount of
-                * in-flight syncio (making current_events bigger or smaller)
-                * The amount in-flight is currently limited to
-                * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
-                * which is at most 4096 sectors.
-                * These numbers are fairly fragile and should be made
-                * more robust, probably by enforcing the
-                * 'window size' that md_do_sync sort-of uses.
+               /* sync IO will cause sync_io to increase before the disk_stats
+                * as sync_io is counted when a request starts, and
+                * disk_stats is counted when it completes.
+                * So resync activity will cause curr_events to be smaller than
+                * when there was no such activity.
+                * non-sync IO will cause disk_stat to increase without
+                * increasing sync_io so curr_events will (eventually)
+                * be larger than it was before.  Once it becomes
+                * substantially larger, the test below will cause
+                * the array to appear non-idle, and resync will slow
+                * down.
+                * If there is a lot of outstanding resync activity when
+                * we set last_event to curr_events, then all that activity
+                * completing might cause the array to appear non-idle
+                * and resync will be slowed down even though there might
+                * not have been non-resync activity.  This will only
+                * happen once though.  'last_events' will soon reflect
+                * the state where there is little or no outstanding
+                * resync requests, and further resync activity will
+                * always make curr_events less than last_events.
                 *
-                * Note: the following is an unsigned comparison.
                 */
-               if ((curr_events - rdev->last_events + 4096) > 8192) {
+               if (curr_events - rdev->last_events > 4096) {
                        rdev->last_events = curr_events;
                        idle = 0;
                }
@@ -5138,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto skip;
                }
-               ITERATE_MDDEV(mddev2,tmp) {
+               for_each_mddev(mddev2, tmp) {
                        if (mddev2 == mddev)
                                continue;
                        if (mddev2->curr_resync && 
@@ -5188,7 +5514,7 @@ void md_do_sync(mddev_t *mddev)
                /* recovery follows the physical size of devices */
                max_sectors = mddev->size << 1;
                j = MaxSector;
-               ITERATE_RDEV(mddev,rdev,rtmp)
+               rdev_for_each(rdev, rtmp, mddev)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(In_sync, &rdev->flags) &&
@@ -5236,8 +5562,16 @@ void md_do_sync(mddev_t *mddev)
                sector_t sectors;
 
                skipped = 0;
+               if (j >= mddev->resync_max) {
+                       sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+                       wait_event(mddev->recovery_wait,
+                                  mddev->resync_max > j
+                                  || kthread_should_stop());
+               }
+               if (kthread_should_stop())
+                       goto interrupted;
                sectors = mddev->pers->sync_request(mddev, j, &skipped,
-                                           currspeed < speed_min(mddev));
+                                                 currspeed < speed_min(mddev));
                if (sectors == 0) {
                        set_bit(MD_RECOVERY_ERR, &mddev->recovery);
                        goto out;
@@ -5279,15 +5613,9 @@ void md_do_sync(mddev_t *mddev)
                }
 
 
-               if (kthread_should_stop()) {
-                       /*
-                        * got a signal, exit.
-                        */
-                       printk(KERN_INFO 
-                               "md: md_do_sync() got signal ... exiting\n");
-                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       goto out;
-               }
+               if (kthread_should_stop())
+                       goto interrupted;
+
 
                /*
                 * this loop exits only if either when we are slower than
@@ -5297,7 +5625,7 @@ void md_do_sync(mddev_t *mddev)
                 * about not overloading the IO subsystem. (things like an
                 * e2fsck being done on the RAID array should execute fast)
                 */
-               mddev->queue->unplug_fn(mddev->queue);
+               blk_unplug(mddev->queue);
                cond_resched();
 
                currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -5316,7 +5644,7 @@ void md_do_sync(mddev_t *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
-       mddev->queue->unplug_fn(mddev->queue);
+       blk_unplug(mddev->queue);
 
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
@@ -5339,7 +5667,7 @@ void md_do_sync(mddev_t *mddev)
                } else {
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
-                       ITERATE_RDEV(mddev,rdev,rtmp)
+                       rdev_for_each(rdev, rtmp, mddev)
                                if (rdev->raid_disk >= 0 &&
                                    !test_bit(Faulty, &rdev->flags) &&
                                    !test_bit(In_sync, &rdev->flags) &&
@@ -5351,9 +5679,22 @@ void md_do_sync(mddev_t *mddev)
 
  skip:
        mddev->curr_resync = 0;
+       mddev->resync_max = MaxSector;
+       sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
        set_bit(MD_RECOVERY_DONE, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
+       return;
+
+ interrupted:
+       /*
+        * got a signal, exit.
+        */
+       printk(KERN_INFO
+              "md: md_do_sync() got signal ... exiting\n");
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+       goto out;
+
 }
 EXPORT_SYMBOL_GPL(md_do_sync);
 
@@ -5364,8 +5705,9 @@ static int remove_and_add_spares(mddev_t *mddev)
        struct list_head *rtmp;
        int spares = 0;
 
-       ITERATE_RDEV(mddev,rdev,rtmp)
+       rdev_for_each(rdev, rtmp, mddev)
                if (rdev->raid_disk >= 0 &&
+                   !mddev->external &&
                    (test_bit(Faulty, &rdev->flags) ||
                     ! test_bit(In_sync, &rdev->flags)) &&
                    atomic_read(&rdev->nr_pending)==0) {
@@ -5379,15 +5721,19 @@ static int remove_and_add_spares(mddev_t *mddev)
                }
 
        if (mddev->degraded) {
-               ITERATE_RDEV(mddev,rdev,rtmp)
+               rdev_for_each(rdev, rtmp, mddev)
                        if (rdev->raid_disk < 0
                            && !test_bit(Faulty, &rdev->flags)) {
                                rdev->recovery_offset = 0;
                                if (mddev->pers->hot_add_disk(mddev,rdev)) {
                                        char nm[20];
                                        sprintf(nm, "rd%d", rdev->raid_disk);
-                                       sysfs_create_link(&mddev->kobj,
-                                                         &rdev->kobj, nm);
+                                       if (sysfs_create_link(&mddev->kobj,
+                                                             &rdev->kobj, nm))
+                                               printk(KERN_WARNING
+                                                      "md: cannot register "
+                                                      "%s for %s\n",
+                                                      nm, mdname(mddev));
                                        spares++;
                                        md_new_event(mddev);
                                } else
@@ -5440,7 +5786,7 @@ void md_check_recovery(mddev_t *mddev)
        }
 
        if ( ! (
-               mddev->flags ||
+               (mddev->flags && !mddev->external) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
                (mddev->safemode == 1) ||
@@ -5456,7 +5802,8 @@ void md_check_recovery(mddev_t *mddev)
                if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
                    !mddev->in_sync && mddev->recovery_cp == MaxSector) {
                        mddev->in_sync = 1;
-                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       if (mddev->persistent)
+                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                }
                if (mddev->safemode == 1)
                        mddev->safemode = 0;
@@ -5488,7 +5835,7 @@ void md_check_recovery(mddev_t *mddev)
                         * information must be scrapped
                         */
                        if (!mddev->degraded)
-                               ITERATE_RDEV(mddev,rdev,rtmp)
+                               rdev_for_each(rdev, rtmp, mddev)
                                        rdev->saved_raid_disk = -1;
 
                        mddev->recovery = 0;
@@ -5565,7 +5912,7 @@ static int md_notify_reboot(struct notifier_block *this,
 
                printk(KERN_INFO "md: stopping all md devices.\n");
 
-               ITERATE_MDDEV(mddev,tmp)
+               for_each_mddev(mddev, tmp)
                        if (mddev_trylock(mddev)) {
                                do_md_stop (mddev, 1);
                                mddev_unlock(mddev);
@@ -5625,27 +5972,48 @@ static int __init md_init(void)
  * Searches all registered partitions for autorun RAID arrays
  * at boot time.
  */
-static dev_t detected_devices[128];
-static int dev_cnt;
+
+static LIST_HEAD(all_detected_devices);
+struct detected_devices_node {
+       struct list_head list;
+       dev_t dev;
+};
 
 void md_autodetect_dev(dev_t dev)
 {
-       if (dev_cnt >= 0 && dev_cnt < 127)
-               detected_devices[dev_cnt++] = dev;
+       struct detected_devices_node *node_detected_dev;
+
+       node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
+       if (node_detected_dev) {
+               node_detected_dev->dev = dev;
+               list_add_tail(&node_detected_dev->list, &all_detected_devices);
+       } else {
+               printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
+                       ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
+       }
 }
 
 
 static void autostart_arrays(int part)
 {
        mdk_rdev_t *rdev;
-       int i;
+       struct detected_devices_node *node_detected_dev;
+       dev_t dev;
+       int i_scanned, i_passed;
 
-       printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+       i_scanned = 0;
+       i_passed = 0;
 
-       for (i = 0; i < dev_cnt; i++) {
-               dev_t dev = detected_devices[i];
+       printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
 
-               rdev = md_import_device(dev,0, 0);
+       while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
+               i_scanned++;
+               node_detected_dev = list_entry(all_detected_devices.next,
+                                       struct detected_devices_node, list);
+               list_del(&node_detected_dev->list);
+               dev = node_detected_dev->dev;
+               kfree(node_detected_dev);
+               rdev = md_import_device(dev,0, 90);
                if (IS_ERR(rdev))
                        continue;
 
@@ -5654,8 +6022,11 @@ static void autostart_arrays(int part)
                        continue;
                }
                list_add(&rdev->same_set, &pending_raid_disks);
+               i_passed++;
        }
-       dev_cnt = 0;
+
+       printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
+                                               i_scanned, i_passed);
 
        autorun_devices(part);
 }
@@ -5675,7 +6046,7 @@ static __exit void md_exit(void)
        unregister_reboot_notifier(&md_notifier);
        unregister_sysctl_table(raid_table_header);
        remove_proc_entry("mdstat", NULL);
-       ITERATE_MDDEV(mddev,tmp) {
+       for_each_mddev(mddev, tmp) {
                struct gendisk *disk = mddev->gendisk;
                if (!disk)
                        continue;
@@ -5687,7 +6058,7 @@ static __exit void md_exit(void)
        }
 }
 
-module_init(md_init)
+subsys_initcall(md_init);
 module_exit(md_exit)
 
 static int get_ro(char *buffer, struct kernel_param *kp)