acpi-wmi: Unmark as 'experimental'
[safe/jmp/linux-2.6] / drivers / md / md.c
index 6fe4a76..a307f87 100644 (file)
    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
 #include <linux/kthread.h>
-#include <linux/linkage.h>
 #include <linux/raid/md.h>
 #include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
 #include <linux/poll.h>
-#include <linux/mutex.h>
 #include <linux/ctype.h>
-#include <linux/freezer.h>
-
-#include <linux/init.h>
-
+#include <linux/hdreg.h>
+#include <linux/proc_fs.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
 #include <linux/file.h>
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-#include <asm/unaligned.h>
+#include <linux/delay.h>
 
 #define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
 
 /* 63 partitions with the alternate major number (mdp) */
 #define MdpMinorShift 6
@@ -66,7 +56,7 @@
 
 
 #ifndef MODULE
-static void autostart_arrays (int part);
+static void autostart_arrays(int part);
 #endif
 
 static LIST_HEAD(pers_list);
@@ -74,6 +64,8 @@ static DEFINE_SPINLOCK(pers_lock);
 
 static void md_print_devices(void);
 
+static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
+
 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
 /*
@@ -167,7 +159,6 @@ void md_new_event(mddev_t *mddev)
 {
        atomic_inc(&md_event_count);
        wake_up(&md_event_waiters);
-       sysfs_notify(&mddev->kobj, NULL, "sync_action");
 }
 EXPORT_SYMBOL_GPL(md_new_event);
 
@@ -211,7 +202,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
                )
 
 
-static int md_fail_request (struct request_queue *q, struct bio *bio)
+static int md_fail_request(struct request_queue *q, struct bio *bio)
 {
        bio_io_error(bio);
        return 0;
@@ -223,17 +214,28 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
        return mddev;
 }
 
+static void mddev_delayed_delete(struct work_struct *ws);
+
 static void mddev_put(mddev_t *mddev)
 {
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
-       if (!mddev->raid_disks && list_empty(&mddev->disks)) {
+       if (!mddev->raid_disks && list_empty(&mddev->disks) &&
+           !mddev->hold_active) {
                list_del(&mddev->all_mddevs);
-               spin_unlock(&all_mddevs_lock);
-               blk_cleanup_queue(mddev->queue);
-               kobject_put(&mddev->kobj);
-       } else
-               spin_unlock(&all_mddevs_lock);
+               if (mddev->gendisk) {
+                       /* we did a probe so need to clean up.
+                        * Call schedule_work inside the spinlock
+                        * so that flush_scheduled_work() after
+                        * mddev_find will succeed in waiting for the
+                        * work to be done.
+                        */
+                       INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+                       schedule_work(&mddev->del_work);
+               } else
+                       kfree(mddev);
+       }
+       spin_unlock(&all_mddevs_lock);
 }
 
 static mddev_t * mddev_find(dev_t unit)
@@ -242,15 +244,50 @@ static mddev_t * mddev_find(dev_t unit)
 
  retry:
        spin_lock(&all_mddevs_lock);
-       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
-               if (mddev->unit == unit) {
-                       mddev_get(mddev);
+
+       if (unit) {
+               list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+                       if (mddev->unit == unit) {
+                               mddev_get(mddev);
+                               spin_unlock(&all_mddevs_lock);
+                               kfree(new);
+                               return mddev;
+                       }
+
+               if (new) {
+                       list_add(&new->all_mddevs, &all_mddevs);
                        spin_unlock(&all_mddevs_lock);
-                       kfree(new);
-                       return mddev;
+                       new->hold_active = UNTIL_IOCTL;
+                       return new;
                }
-
-       if (new) {
+       } else if (new) {
+               /* find an unused unit number */
+               static int next_minor = 512;
+               int start = next_minor;
+               int is_free = 0;
+               int dev = 0;
+               while (!is_free) {
+                       dev = MKDEV(MD_MAJOR, next_minor);
+                       next_minor++;
+                       if (next_minor > MINORMASK)
+                               next_minor = 0;
+                       if (next_minor == start) {
+                               /* Oh dear, all in use. */
+                               spin_unlock(&all_mddevs_lock);
+                               kfree(new);
+                               return NULL;
+                       }
+                               
+                       is_free = 1;
+                       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+                               if (mddev->unit == dev) {
+                                       is_free = 0;
+                                       break;
+                               }
+               }
+               new->unit = dev;
+               new->md_minor = MINOR(dev);
+               new->hold_active = UNTIL_STOP;
                list_add(&new->all_mddevs, &all_mddevs);
                spin_unlock(&all_mddevs_lock);
                return new;
@@ -272,19 +309,14 @@ static mddev_t * mddev_find(dev_t unit)
        INIT_LIST_HEAD(&new->all_mddevs);
        init_timer(&new->safemode_timer);
        atomic_set(&new->active, 1);
+       atomic_set(&new->openers, 0);
        spin_lock_init(&new->write_lock);
        init_waitqueue_head(&new->sb_wait);
+       init_waitqueue_head(&new->recovery_wait);
        new->reshape_position = MaxSector;
+       new->resync_min = 0;
        new->resync_max = MaxSector;
-
-       new->queue = blk_alloc_queue(GFP_KERNEL);
-       if (!new->queue) {
-               kfree(new);
-               return NULL;
-       }
-       set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
-
-       blk_queue_make_request(new->queue, md_fail_request);
+       new->level = LEVEL_NONE;
 
        goto retry;
 }
@@ -308,25 +340,23 @@ static inline void mddev_unlock(mddev_t * mddev)
 
 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
 {
-       mdk_rdev_t * rdev;
-       struct list_head *tmp;
+       mdk_rdev_t *rdev;
 
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->desc_nr == nr)
                        return rdev;
-       }
+
        return NULL;
 }
 
 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
 {
-       struct list_head *tmp;
        mdk_rdev_t *rdev;
 
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->bdev->bd_dev == dev)
                        return rdev;
-       }
+
        return NULL;
 }
 
@@ -342,21 +372,20 @@ static struct mdk_personality *find_pers(int level, char *clevel)
        return NULL;
 }
 
+/* return the offset of the super block in 512byte sectors */
 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
 {
-       sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
-       return MD_NEW_SIZE_BLOCKS(size);
+       sector_t num_sectors = bdev->bd_inode->i_size / 512;
+       return MD_NEW_SIZE_SECTORS(num_sectors);
 }
 
-static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
+static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
 {
-       sector_t size;
-
-       size = rdev->sb_offset;
+       sector_t num_sectors = rdev->sb_start;
 
        if (chunk_size)
-               size &= ~((sector_t)chunk_size/1024 - 1);
-       return size;
+               num_sectors &= ~((sector_t)chunk_size/512 - 1);
+       return num_sectors;
 }
 
 static int alloc_disk_sb(mdk_rdev_t * rdev)
@@ -367,7 +396,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
        rdev->sb_page = alloc_page(GFP_KERNEL);
        if (!rdev->sb_page) {
                printk(KERN_ALERT "md: out of memory.\n");
-               return -EINVAL;
+               return -ENOMEM;
        }
 
        return 0;
@@ -379,7 +408,7 @@ static void free_disk_sb(mdk_rdev_t * rdev)
                put_page(rdev->sb_page);
                rdev->sb_loaded = 0;
                rdev->sb_page = NULL;
-               rdev->sb_offset = 0;
+               rdev->sb_start = 0;
                rdev->size = 0;
        }
 }
@@ -440,7 +469,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
         * causes ENOTSUPP, we allocate a spare bio...
         */
        struct bio *bio = bio_alloc(GFP_NOIO, 1);
-       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
+       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
 
        bio->bi_bdev = rdev->bdev;
        bio->bi_sector = sector;
@@ -497,7 +526,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
        struct completion event;
        int ret;
 
-       rw |= (1 << BIO_RW_SYNC);
+       rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
 
        bio->bi_bdev = bdev;
        bio->bi_sector = sector;
@@ -525,7 +554,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
                return 0;
 
 
-       if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
+       if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
                goto fail;
        rdev->sb_loaded = 1;
        return 0;
@@ -538,17 +567,12 @@ fail:
 
 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
-       if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
-               (sb1->set_uuid1 == sb2->set_uuid1) &&
-               (sb1->set_uuid2 == sb2->set_uuid2) &&
-               (sb1->set_uuid3 == sb2->set_uuid3))
-
-               return 1;
-
-       return 0;
+       return  sb1->set_uuid0 == sb2->set_uuid0 &&
+               sb1->set_uuid1 == sb2->set_uuid1 &&
+               sb1->set_uuid2 == sb2->set_uuid2 &&
+               sb1->set_uuid3 == sb2->set_uuid3;
 }
 
-
 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
        int ret;
@@ -559,7 +583,7 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 
        if (!tmp1 || !tmp2) {
                ret = 0;
-               printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
+               printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
                goto abort;
        }
 
@@ -572,11 +596,7 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
        tmp1->nr_disks = 0;
        tmp2->nr_disks = 0;
 
-       if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
-               ret = 0;
-       else
-               ret = 1;
-
+       ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
 abort:
        kfree(tmp1);
        kfree(tmp2);
@@ -653,11 +673,14 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
  */
 
 struct super_type  {
-       char            *name;
-       struct module   *owner;
-       int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
-       int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
-       void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       char                *name;
+       struct module       *owner;
+       int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
+                                         int minor_version);
+       int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
+                                               sector_t num_sectors);
 };
 
 /*
@@ -668,16 +691,14 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        mdp_super_t *sb;
        int ret;
-       sector_t sb_offset;
 
        /*
-        * Calculate the position of the superblock,
+        * Calculate the position of the superblock (512byte sectors),
         * it's at the end of the disk.
         *
         * It also happens to be a multiple of 4Kb.
         */
-       sb_offset = calc_dev_sboffset(rdev->bdev);
-       rdev->sb_offset = sb_offset;
+       rdev->sb_start = calc_dev_sboffset(rdev->bdev);
 
        ret = read_disk_sb(rdev, MD_SB_BYTES);
        if (ret) return ret;
@@ -754,7 +775,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
                else 
                        ret = 0;
        }
-       rdev->size = calc_dev_size(rdev, sb->chunk_size);
+       rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2;
 
        if (rdev->size < sb->size && sb->level > 1)
                /* "this cannot possibly happen" ... */
@@ -871,7 +892,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        mdp_super_t *sb;
-       struct list_head *tmp;
        mdk_rdev_t *rdev2;
        int next_spare = mddev->raid_disks;
 
@@ -943,7 +963,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
-       rdev_for_each(rdev2, tmp, mddev) {
+       list_for_each_entry(rdev2, &mddev->disks, same_set) {
                mdp_disk_t *d;
                int desc_nr;
                if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -999,6 +1019,26 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 }
 
 /*
+ * rdev_size_change for 0.90.0
+ */
+static unsigned long long
+super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
+{
+       if (num_sectors && num_sectors < rdev->mddev->size * 2)
+               return 0; /* component must fit device */
+       if (rdev->mddev->bitmap_offset)
+               return 0; /* can't move bitmap */
+       rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+       if (!num_sectors || num_sectors > rdev->sb_start)
+               num_sectors = rdev->sb_start;
+       md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+                      rdev->sb_page);
+       md_super_wait(rdev->mddev);
+       return num_sectors / 2; /* kB for sysfs */
+}
+
+
+/*
  * version 1 superblock
  */
 
@@ -1029,12 +1069,12 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
 {
        struct mdp_superblock_1 *sb;
        int ret;
-       sector_t sb_offset;
+       sector_t sb_start;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        int bmask;
 
        /*
-        * Calculate the position of the superblock.
+        * Calculate the position of the superblock in 512byte sectors.
         * It is always aligned to a 4K boundary and
         * depeding on minor_version, it can be:
         * 0: At least 8K, but less than 12K, from end of device
@@ -1043,22 +1083,20 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
         */
        switch(minor_version) {
        case 0:
-               sb_offset = rdev->bdev->bd_inode->i_size >> 9;
-               sb_offset -= 8*2;
-               sb_offset &= ~(sector_t)(4*2-1);
-               /* convert from sectors to K */
-               sb_offset /= 2;
+               sb_start = rdev->bdev->bd_inode->i_size >> 9;
+               sb_start -= 8*2;
+               sb_start &= ~(sector_t)(4*2-1);
                break;
        case 1:
-               sb_offset = 0;
+               sb_start = 0;
                break;
        case 2:
-               sb_offset = 4;
+               sb_start = 8;
                break;
        default:
                return -EINVAL;
        }
-       rdev->sb_offset = sb_offset;
+       rdev->sb_start = sb_start;
 
        /* superblock is rarely larger than 1K, but it can be larger,
         * and it is safe to read 4k, so we do that
@@ -1072,7 +1110,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
            sb->major_version != cpu_to_le32(1) ||
            le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
-           le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
+           le64_to_cpu(sb->super_offset) != rdev->sb_start ||
            (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
                return -EINVAL;
 
@@ -1108,7 +1146,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                rdev->sb_size = (rdev->sb_size | bmask) + 1;
 
        if (minor_version
-           && rdev->data_offset < sb_offset + (rdev->sb_size/512))
+           && rdev->data_offset < sb_start + (rdev->sb_size/512))
                return -EINVAL;
 
        if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
@@ -1144,7 +1182,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        if (minor_version)
                rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
        else
-               rdev->size = rdev->sb_offset;
+               rdev->size = rdev->sb_start / 2;
        if (rdev->size < le64_to_cpu(sb->data_size)/2)
                return -EINVAL;
        rdev->size = le64_to_cpu(sb->data_size)/2;
@@ -1251,7 +1289,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        struct mdp_superblock_1 *sb;
-       struct list_head *tmp;
        mdk_rdev_t *rdev2;
        int max_dev, i;
        /* make rdev->sb match mddev and rdev data. */
@@ -1299,7 +1336,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        }
 
        max_dev = 0;
-       rdev_for_each(rdev2, tmp, mddev)
+       list_for_each_entry(rdev2, &mddev->disks, same_set)
                if (rdev2->desc_nr+1 > max_dev)
                        max_dev = rdev2->desc_nr+1;
 
@@ -1308,7 +1345,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
-       rdev_for_each(rdev2, tmp, mddev) {
+       list_for_each_entry(rdev2, &mddev->disks, same_set) {
                i = rdev2->desc_nr;
                if (test_bit(Faulty, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1323,35 +1360,74 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->sb_csum = calc_sb_1_csum(sb);
 }
 
+static unsigned long long
+super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
+{
+       struct mdp_superblock_1 *sb;
+       sector_t max_sectors;
+       if (num_sectors && num_sectors < rdev->mddev->size * 2)
+               return 0; /* component must fit device */
+       if (rdev->sb_start < rdev->data_offset) {
+               /* minor versions 1 and 2; superblock before data */
+               max_sectors = rdev->bdev->bd_inode->i_size >> 9;
+               max_sectors -= rdev->data_offset;
+               if (!num_sectors || num_sectors > max_sectors)
+                       num_sectors = max_sectors;
+       } else if (rdev->mddev->bitmap_offset) {
+               /* minor version 0 with bitmap we can't move */
+               return 0;
+       } else {
+               /* minor version 0; superblock after data */
+               sector_t sb_start;
+               sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+               sb_start &= ~(sector_t)(4*2 - 1);
+               max_sectors = rdev->size * 2 + sb_start - rdev->sb_start;
+               if (!num_sectors || num_sectors > max_sectors)
+                       num_sectors = max_sectors;
+               rdev->sb_start = sb_start;
+       }
+       sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
+       sb->data_size = cpu_to_le64(num_sectors);
+       sb->super_offset = rdev->sb_start;
+       sb->sb_csum = calc_sb_1_csum(sb);
+       md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+                      rdev->sb_page);
+       md_super_wait(rdev->mddev);
+       return num_sectors / 2; /* kB for sysfs */
+}
 
 static struct super_type super_types[] = {
        [0] = {
                .name   = "0.90.0",
                .owner  = THIS_MODULE,
-               .load_super     = super_90_load,
-               .validate_super = super_90_validate,
-               .sync_super     = super_90_sync,
+               .load_super         = super_90_load,
+               .validate_super     = super_90_validate,
+               .sync_super         = super_90_sync,
+               .rdev_size_change   = super_90_rdev_size_change,
        },
        [1] = {
                .name   = "md-1",
                .owner  = THIS_MODULE,
-               .load_super     = super_1_load,
-               .validate_super = super_1_validate,
-               .sync_super     = super_1_sync,
+               .load_super         = super_1_load,
+               .validate_super     = super_1_validate,
+               .sync_super         = super_1_sync,
+               .rdev_size_change   = super_1_rdev_size_change,
        },
 };
 
 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
 {
-       struct list_head *tmp, *tmp2;
        mdk_rdev_t *rdev, *rdev2;
 
-       rdev_for_each(rdev, tmp, mddev1)
-               rdev_for_each(rdev2, tmp2, mddev2)
+       rcu_read_lock();
+       rdev_for_each_rcu(rdev, mddev1)
+               rdev_for_each_rcu(rdev2, mddev2)
                        if (rdev->bdev->bd_contains ==
-                           rdev2->bdev->bd_contains)
+                           rdev2->bdev->bd_contains) {
+                               rcu_read_unlock();
                                return 1;
-
+                       }
+       rcu_read_unlock();
        return 0;
 }
 
@@ -1368,6 +1444,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                MD_BUG();
                return -EINVAL;
        }
+
+       /* prevent duplicates */
+       if (find_rdev(mddev, rdev->bdev->bd_dev))
+               return -EEXIST;
+
        /* make sure rdev->size exceeds mddev->size */
        if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
                if (mddev->pers) {
@@ -1395,6 +1476,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                if (find_rdev_nr(mddev, rdev->desc_nr))
                        return -EBUSY;
        }
+       if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
+               printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
+                      mdname(mddev), mddev->max_disks);
+               return -EBUSY;
+       }
        bdevname(rdev->bdev,b);
        while ( (s=strchr(b, '/')) != NULL)
                *s = '!';
@@ -1405,16 +1491,18 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
                goto fail;
 
-       if (rdev->bdev->bd_part)
-               ko = &rdev->bdev->bd_part->dev.kobj;
-       else
-               ko = &rdev->bdev->bd_disk->dev.kobj;
+       ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
        if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
                kobject_del(&rdev->kobj);
                goto fail;
        }
-       list_add(&rdev->same_set, &mddev->disks);
+       rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+
+       list_add_rcu(&rdev->same_set, &mddev->disks);
        bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
+
+       /* May as well allow recovery to be retried once */
+       mddev->recovery_disabled = 0;
        return 0;
 
  fail:
@@ -1438,14 +1526,17 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
                return;
        }
        bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
-       list_del_init(&rdev->same_set);
+       list_del_rcu(&rdev->same_set);
        printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
        rdev->mddev = NULL;
        sysfs_remove_link(&rdev->kobj, "block");
-
+       sysfs_put(rdev->sysfs_state);
+       rdev->sysfs_state = NULL;
        /* We need to delay this, otherwise we can deadlock when
-        * writing to 'remove' to "dev/state"
+        * writing to 'remove' to "dev/state".  We also need
+        * to delay it due to rcu usage.
         */
+       synchronize_rcu();
        INIT_WORK(&rdev->del_work, md_delayed_delete);
        kobject_get(&rdev->kobj);
        schedule_work(&rdev->del_work);
@@ -1472,7 +1563,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
        if (err) {
                printk(KERN_ERR "md: could not bd_claim %s.\n",
                        bdevname(bdev, b));
-               blkdev_put(bdev);
+               blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
                return err;
        }
        if (!shared)
@@ -1488,7 +1579,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
        if (!bdev)
                MD_BUG();
        bd_release(bdev);
-       blkdev_put(bdev);
+       blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
 }
 
 void md_autodetect_dev(dev_t dev);
@@ -1501,7 +1592,6 @@ static void export_rdev(mdk_rdev_t * rdev)
        if (rdev->mddev)
                MD_BUG();
        free_disk_sb(rdev);
-       list_del_init(&rdev->same_set);
 #ifndef MODULE
        if (test_bit(AutoDetected, &rdev->flags))
                md_autodetect_dev(rdev->bdev->bd_dev);
@@ -1518,8 +1608,7 @@ static void kick_rdev_from_array(mdk_rdev_t * rdev)
 
 static void export_array(mddev_t *mddev)
 {
-       struct list_head *tmp;
-       mdk_rdev_t *rdev;
+       mdk_rdev_t *rdev, *tmp;
 
        rdev_for_each(rdev, tmp, mddev) {
                if (!rdev->mddev) {
@@ -1540,7 +1629,7 @@ static void print_desc(mdp_disk_t *desc)
                desc->major,desc->minor,desc->raid_disk,desc->state);
 }
 
-static void print_sb(mdp_super_t *sb)
+static void print_sb_90(mdp_super_t *sb)
 {
        int i;
 
@@ -1571,10 +1660,57 @@ static void print_sb(mdp_super_t *sb)
        }
        printk(KERN_INFO "md:     THIS: ");
        print_desc(&sb->this_disk);
-
 }
 
-static void print_rdev(mdk_rdev_t *rdev)
+static void print_sb_1(struct mdp_superblock_1 *sb)
+{
+       __u8 *uuid;
+
+       uuid = sb->set_uuid;
+       printk(KERN_INFO "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
+                       ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+              KERN_INFO "md:    Name: \"%s\" CT:%llu\n",
+               le32_to_cpu(sb->major_version),
+               le32_to_cpu(sb->feature_map),
+               uuid[0], uuid[1], uuid[2], uuid[3],
+               uuid[4], uuid[5], uuid[6], uuid[7],
+               uuid[8], uuid[9], uuid[10], uuid[11],
+               uuid[12], uuid[13], uuid[14], uuid[15],
+               sb->set_name,
+               (unsigned long long)le64_to_cpu(sb->ctime)
+                      & MD_SUPERBLOCK_1_TIME_SEC_MASK);
+
+       uuid = sb->device_uuid;
+       printk(KERN_INFO "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
+                       " RO:%llu\n"
+              KERN_INFO "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+                       ":%02x%02x%02x%02x%02x%02x\n"
+              KERN_INFO "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
+              KERN_INFO "md:         (MaxDev:%u) \n",
+               le32_to_cpu(sb->level),
+               (unsigned long long)le64_to_cpu(sb->size),
+               le32_to_cpu(sb->raid_disks),
+               le32_to_cpu(sb->layout),
+               le32_to_cpu(sb->chunksize),
+               (unsigned long long)le64_to_cpu(sb->data_offset),
+               (unsigned long long)le64_to_cpu(sb->data_size),
+               (unsigned long long)le64_to_cpu(sb->super_offset),
+               (unsigned long long)le64_to_cpu(sb->recovery_offset),
+               le32_to_cpu(sb->dev_number),
+               uuid[0], uuid[1], uuid[2], uuid[3],
+               uuid[4], uuid[5], uuid[6], uuid[7],
+               uuid[8], uuid[9], uuid[10], uuid[11],
+               uuid[12], uuid[13], uuid[14], uuid[15],
+               sb->devflags,
+               (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
+               (unsigned long long)le64_to_cpu(sb->events),
+               (unsigned long long)le64_to_cpu(sb->resync_offset),
+               le32_to_cpu(sb->sb_csum),
+               le32_to_cpu(sb->max_dev)
+               );
+}
+
+static void print_rdev(mdk_rdev_t *rdev, int major_version)
 {
        char b[BDEVNAME_SIZE];
        printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
@@ -1582,15 +1718,22 @@ static void print_rdev(mdk_rdev_t *rdev)
                test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
                rdev->desc_nr);
        if (rdev->sb_loaded) {
-               printk(KERN_INFO "md: rdev superblock:\n");
-               print_sb((mdp_super_t*)page_address(rdev->sb_page));
+               printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
+               switch (major_version) {
+               case 0:
+                       print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
+                       break;
+               case 1:
+                       print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
+                       break;
+               }
        } else
                printk(KERN_INFO "md: no rdev superblock!\n");
 }
 
 static void md_print_devices(void)
 {
-       struct list_head *tmp, *tmp2;
+       struct list_head *tmp;
        mdk_rdev_t *rdev;
        mddev_t *mddev;
        char b[BDEVNAME_SIZE];
@@ -1605,12 +1748,12 @@ static void md_print_devices(void)
                        bitmap_print_sb(mddev->bitmap);
                else
                        printk("%s: ", mdname(mddev));
-               rdev_for_each(rdev, tmp2, mddev)
+               list_for_each_entry(rdev, &mddev->disks, same_set)
                        printk("<%s>", bdevname(rdev->bdev,b));
                printk("\n");
 
-               rdev_for_each(rdev, tmp2, mddev)
-                       print_rdev(rdev);
+               list_for_each_entry(rdev, &mddev->disks, same_set)
+                       print_rdev(rdev, mddev->major_version);
        }
        printk("md:     **********************************\n");
        printk("\n");
@@ -1626,9 +1769,8 @@ static void sync_sbs(mddev_t * mddev, int nospares)
         * with the rest of the array)
         */
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
 
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
                     rdev->raid_disk < 0 &&
@@ -1646,11 +1788,12 @@ static void sync_sbs(mddev_t * mddev, int nospares)
 
 static void md_update_sb(mddev_t * mddev, int force_change)
 {
-       struct list_head *tmp;
        mdk_rdev_t *rdev;
        int sync_req;
        int nospares = 0;
 
+       if (mddev->external)
+               return;
 repeat:
        spin_lock_irq(&mddev->write_lock);
 
@@ -1735,7 +1878,7 @@ repeat:
                mdname(mddev),mddev->in_sync);
 
        bitmap_update_sb(mddev->bitmap);
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                char b[BDEVNAME_SIZE];
                dprintk(KERN_INFO "md: ");
                if (rdev->sb_loaded != 1)
@@ -1746,11 +1889,11 @@ repeat:
                dprintk("%s ", bdevname(rdev->bdev,b));
                if (!test_bit(Faulty, &rdev->flags)) {
                        md_super_write(mddev,rdev,
-                                      rdev->sb_offset<<1, rdev->sb_size,
+                                      rdev->sb_start, rdev->sb_size,
                                       rdev->sb_page);
                        dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
                                bdevname(rdev->bdev,b),
-                               (unsigned long long)rdev->sb_offset);
+                               (unsigned long long)rdev->sb_start);
                        rdev->sb_events = mddev->events;
 
                } else
@@ -1775,7 +1918,7 @@ repeat:
 
 }
 
-/* words written to sysfs files may, or my not, be \n terminated.
+/* words written to sysfs files may, or may not, be \n terminated.
  * We want to accept with case. For this we use cmd_match.
  */
 static int cmd_match(const char *cmd, const char *str)
@@ -1819,6 +1962,10 @@ state_show(mdk_rdev_t *rdev, char *page)
                len += sprintf(page+len, "%swrite_mostly",sep);
                sep = ",";
        }
+       if (test_bit(Blocked, &rdev->flags)) {
+               len += sprintf(page+len, "%sblocked", sep);
+               sep = ",";
+       }
        if (!test_bit(Faulty, &rdev->flags) &&
            !test_bit(In_sync, &rdev->flags)) {
                len += sprintf(page+len, "%sspare", sep);
@@ -1835,6 +1982,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
         *  remove  - disconnects the device
         *  writemostly - sets write_mostly
         *  -writemostly - clears write_mostly
+        *  blocked - sets the Blocked flag
+        *  -blocked - clears the Blocked flag
         */
        int err = -EINVAL;
        if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -1857,7 +2006,19 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        } else if (cmd_match(buf, "-writemostly")) {
                clear_bit(WriteMostly, &rdev->flags);
                err = 0;
+       } else if (cmd_match(buf, "blocked")) {
+               set_bit(Blocked, &rdev->flags);
+               err = 0;
+       } else if (cmd_match(buf, "-blocked")) {
+               clear_bit(Blocked, &rdev->flags);
+               wake_up(&rdev->blocked_wait);
+               set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+               md_wakeup_thread(rdev->mddev->thread);
+
+               err = 0;
        }
+       if (!err && rdev->sysfs_state)
+               sysfs_notify_dirent(rdev->sysfs_state);
        return err ? err : len;
 }
 static struct rdev_sysfs_entry rdev_state =
@@ -1903,7 +2064,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                slot = -1;
        else if (e==buf || (*e && *e!= '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers) {
+       if (rdev->mddev->pers && slot == -1) {
                /* Setting 'slot' on an active array requires also
                 * updating the 'rd%d' link, and communicating
                 * with the personality with ->hot_*_disk.
@@ -1911,8 +2072,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                 * failed/spare devices.  This normally happens automatically,
                 * but not when the metadata is externally managed.
                 */
-               if (slot != -1)
-                       return -EBUSY;
                if (rdev->raid_disk == -1)
                        return -EEXIST;
                /* personality does all needed checks */
@@ -1926,6 +2085,42 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                sysfs_remove_link(&rdev->mddev->kobj, nm);
                set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
                md_wakeup_thread(rdev->mddev->thread);
+       } else if (rdev->mddev->pers) {
+               mdk_rdev_t *rdev2;
+               /* Activating a spare .. or possibly reactivating
+                * if we every get bitmaps working here.
+                */
+
+               if (rdev->raid_disk != -1)
+                       return -EBUSY;
+
+               if (rdev->mddev->pers->hot_add_disk == NULL)
+                       return -EINVAL;
+
+               list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
+                       if (rdev2->raid_disk == slot)
+                               return -EEXIST;
+
+               rdev->raid_disk = slot;
+               if (test_bit(In_sync, &rdev->flags))
+                       rdev->saved_raid_disk = slot;
+               else
+                       rdev->saved_raid_disk = -1;
+               err = rdev->mddev->pers->
+                       hot_add_disk(rdev->mddev, rdev);
+               if (err) {
+                       rdev->raid_disk = -1;
+                       return err;
+               } else
+                       sysfs_notify_dirent(rdev->sysfs_state);
+               sprintf(nm, "rd%d", rdev->raid_disk);
+               if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
+                       printk(KERN_WARNING
+                              "md: cannot register "
+                              "%s for %s\n",
+                              nm, mdname(rdev->mddev));
+
+               /* don't wakeup anyone, leave that to userspace. */
        } else {
                if (slot >= rdev->mddev->raid_disks)
                        return -ENOSPC;
@@ -1934,6 +2129,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                clear_bit(Faulty, &rdev->flags);
                clear_bit(WriteMostly, &rdev->flags);
                set_bit(In_sync, &rdev->flags);
+               sysfs_notify_dirent(rdev->sysfs_state);
        }
        return len;
 }
@@ -1955,7 +2151,7 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        unsigned long long offset = simple_strtoull(buf, &e, 10);
        if (e==buf || (*e && *e != '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers)
+       if (rdev->mddev->pers && rdev->raid_disk >= 0)
                return -EBUSY;
        if (rdev->size && rdev->mddev->external)
                /* Must set offset before size, so overlap checks
@@ -1987,17 +2183,28 @@ static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
 static ssize_t
 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 {
-       char *e;
-       unsigned long long size = simple_strtoull(buf, &e, 10);
+       unsigned long long size;
        unsigned long long oldsize = rdev->size;
        mddev_t *my_mddev = rdev->mddev;
 
-       if (e==buf || (*e && *e != '\n'))
+       if (strict_strtoull(buf, 10, &size) < 0)
                return -EINVAL;
-       if (my_mddev->pers)
-               return -EBUSY;
+       if (my_mddev->pers && rdev->raid_disk >= 0) {
+               if (my_mddev->persistent) {
+                       size = super_types[my_mddev->major_version].
+                               rdev_size_change(rdev, size * 2);
+                       if (!size)
+                               return -EBUSY;
+               } else if (!size) {
+                       size = (rdev->bdev->bd_inode->i_size >> 10);
+                       size -= rdev->data_offset/2;
+               }
+       }
+       if (size < my_mddev->size)
+               return -EINVAL; /* component must fit device */
+
        rdev->size = size;
-       if (size > oldsize && rdev->mddev->external) {
+       if (size > oldsize && my_mddev->external) {
                /* need to check that all other rdevs with the same ->bdev
                 * do not overlap.  We need to unlock the mddev to avoid
                 * a deadlock.  We have already changed rdev->size, and if
@@ -2005,19 +2212,20 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                 */
                mddev_t *mddev;
                int overlap = 0;
-               struct list_head *tmp, *tmp2;
+               struct list_head *tmp;
 
                mddev_unlock(my_mddev);
                for_each_mddev(mddev, tmp) {
                        mdk_rdev_t *rdev2;
 
                        mddev_lock(mddev);
-                       rdev_for_each(rdev2, tmp2, mddev)
+                       list_for_each_entry(rdev2, &mddev->disks, same_set)
                                if (test_bit(AllReserved, &rdev2->flags) ||
                                    (rdev->bdev == rdev2->bdev &&
                                     rdev != rdev2 &&
-                                    overlaps(rdev->data_offset, rdev->size,
-                                           rdev2->data_offset, rdev2->size))) {
+                                    overlaps(rdev->data_offset, rdev->size * 2,
+                                             rdev2->data_offset,
+                                             rdev2->size * 2))) {
                                        overlap = 1;
                                        break;
                                }
@@ -2039,8 +2247,6 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                        return -EBUSY;
                }
        }
-       if (size < my_mddev->size || my_mddev->size == 0)
-               my_mddev->size = size;
        return len;
 }
 
@@ -2096,7 +2302,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
                        rv = -EBUSY;
                else
                        rv = entry->store(rdev, page, length);
-               mddev_unlock(rdev->mddev);
+               mddev_unlock(mddev);
        }
        return rv;
 }
@@ -2185,7 +2391,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
                        goto abort_free;
                }
        }
+
        INIT_LIST_HEAD(&rdev->same_set);
+       init_waitqueue_head(&rdev->blocked_wait);
 
        return rdev;
 
@@ -2207,8 +2415,7 @@ abort_free:
 static void analyze_sbs(mddev_t * mddev)
 {
        int i;
-       struct list_head *tmp;
-       mdk_rdev_t *rdev, *freshest;
+       mdk_rdev_t *rdev, *freshest, *tmp;
        char b[BDEVNAME_SIZE];
 
        freshest = NULL;
@@ -2234,6 +2441,15 @@ static void analyze_sbs(mddev_t * mddev)
 
        i = 0;
        rdev_for_each(rdev, tmp, mddev) {
+               if (rdev->desc_nr >= mddev->max_disks ||
+                   i > mddev->max_disks) {
+                       printk(KERN_WARNING
+                              "md: %s: %s: only %d devices permitted\n",
+                              mdname(mddev), bdevname(rdev->bdev, b),
+                              mddev->max_disks);
+                       kick_rdev_from_array(rdev);
+                       continue;
+               }
                if (rdev != freshest)
                        if (super_types[mddev->major_version].
                            validate_super(mddev, rdev)) {
@@ -2263,6 +2479,8 @@ static void analyze_sbs(mddev_t * mddev)
 
 }
 
+static void md_safemode_timeout(unsigned long data);
+
 static ssize_t
 safe_delay_show(mddev_t *mddev, char *page)
 {
@@ -2277,12 +2495,11 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
        int i;
        unsigned long msec;
        char buf[30];
-       char *e;
+
        /* remove a period, and count digits after it */
        if (len >= sizeof(buf))
                return -EINVAL;
-       strlcpy(buf, cbuf, len);
-       buf[len] = 0;
+       strlcpy(buf, cbuf, sizeof(buf));
        for (i=0; i<len; i++) {
                if (dot) {
                        if (isdigit(buf[i])) {
@@ -2295,16 +2512,18 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
                        buf[i] = 0;
                }
        }
-       msec = simple_strtoul(buf, &e, 10);
-       if (e == buf || (*e && *e != '\n'))
+       if (strict_strtoul(buf, 10, &msec) < 0)
                return -EINVAL;
        msec = (msec * 1000) / scale;
        if (msec == 0)
                mddev->safemode_delay = 0;
        else {
+               unsigned long old_delay = mddev->safemode_delay;
                mddev->safemode_delay = (msec*HZ)/1000;
                if (mddev->safemode_delay == 0)
                        mddev->safemode_delay = 1;
+               if (mddev->safemode_delay < old_delay)
+                       md_safemode_timeout((unsigned long)mddev);
        }
        return len;
 }
@@ -2456,7 +2675,6 @@ resync_start_show(mddev_t *mddev, char *page)
 static ssize_t
 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
 {
-       /* can only set chunk_size if array is not yet active */
        char *e;
        unsigned long long n = simple_strtoull(buf, &e, 10);
 
@@ -2483,7 +2701,7 @@ __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
  *     When written, doesn't tear down array, but just stops it
  * suspended (not supported yet)
  *     All IO requests will block. The array can be reconfigured.
- *     Writing this, if accepted, will block until array is quiessent
+ *     Writing this, if accepted, will block until array is quiescent
  * readonly
  *     no resync can happen.  no superblocks get written.
  *     write requests fail
@@ -2556,7 +2774,7 @@ array_state_show(mddev_t *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(mddev_t * mddev, int ro);
+static int do_md_stop(mddev_t * mddev, int ro, int is_open);
 static int do_md_run(mddev_t * mddev);
 static int restart_array(mddev_t *mddev);
 
@@ -2570,16 +2788,16 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                break;
        case clear:
                /* stopping an active array */
-               if (atomic_read(&mddev->active) > 1)
+               if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0);
+               err = do_md_stop(mddev, 0, 0);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
-                       if (atomic_read(&mddev->active) > 1)
+                       if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2);
+                       err = do_md_stop(mddev, 2, 0);
                } else
                        err = 0; /* already inactive */
                break;
@@ -2587,18 +2805,23 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = do_md_stop(mddev, 1);
+                       err = do_md_stop(mddev, 1, 0);
                else {
                        mddev->ro = 1;
+                       set_disk_ro(mddev->gendisk, 1);
                        err = do_md_run(mddev);
                }
                break;
        case read_auto:
-               /* stopping an active array */
                if (mddev->pers) {
-                       err = do_md_stop(mddev, 1);
-                       if (err == 0)
-                               mddev->ro = 2; /* FIXME mark devices writable */
+                       if (mddev->ro == 0)
+                               err = do_md_stop(mddev, 1, 0);
+                       else if (mddev->ro == 1)
+                               err = restart_array(mddev);
+                       if (err == 0) {
+                               mddev->ro = 2;
+                               set_disk_ro(mddev->gendisk, 0);
+                       }
                } else {
                        mddev->ro = 2;
                        err = do_md_run(mddev);
@@ -2611,6 +2834,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        if (atomic_read(&mddev->writes_pending) == 0) {
                                if (mddev->in_sync == 0) {
                                        mddev->in_sync = 1;
+                                       if (mddev->safemode == 1)
+                                               mddev->safemode = 0;
                                        if (mddev->persistent)
                                                set_bit(MD_CHANGE_CLEAN,
                                                        &mddev->flags);
@@ -2634,6 +2859,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        err = 0;
                } else {
                        mddev->ro = 0;
+                       set_disk_ro(mddev->gendisk, 0);
                        err = do_md_run(mddev);
                }
                break;
@@ -2644,8 +2870,10 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
        }
        if (err)
                return err;
-       else
+       else {
+               sysfs_notify_dirent(mddev->sysfs_state);
                return len;
+       }
 }
 static struct md_sysfs_entry md_array_state =
 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
@@ -2748,7 +2976,7 @@ size_show(mddev_t *mddev, char *page)
        return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
 }
 
-static int update_size(mddev_t *mddev, unsigned long size);
+static int update_size(mddev_t *mddev, sector_t num_sectors);
 
 static ssize_t
 size_store(mddev_t *mddev, const char *buf, size_t len)
@@ -2765,7 +2993,7 @@ size_store(mddev_t *mddev, const char *buf, size_t len)
                return -EINVAL;
 
        if (mddev->pers) {
-               err = update_size(mddev, size);
+               err = update_size(mddev, size * 2);
                md_update_sb(mddev, 1);
        } else {
                if (mddev->size == 0 ||
@@ -2804,7 +3032,13 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
 {
        int major, minor;
        char *e;
-       if (!list_empty(&mddev->disks))
+       /* Changing the details of 'external' metadata is
+        * always permitted.  Otherwise there must be
+        * no devices attached to the array.
+        */
+       if (mddev->external && strncmp(buf, "external:", 9) == 0)
+               ;
+       else if (!list_empty(&mddev->disks))
                return -EBUSY;
 
        if (cmd_match(buf, "none")) {
@@ -2862,7 +3096,7 @@ action_show(mddev_t *mddev, char *page)
                                type = "check";
                        else
                                type = "repair";
-               } else
+               } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
                        type = "recover";
        }
        return sprintf(page, "%s\n", type);
@@ -2884,15 +3118,19 @@ action_store(mddev_t *mddev, const char *page, size_t len)
        } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
-       else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
+       else if (cmd_match(page, "resync"))
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-       else if (cmd_match(page, "reshape")) {
+       else if (cmd_match(page, "recover")) {
+               set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       } else if (cmd_match(page, "reshape")) {
                int err;
                if (mddev->pers->start_reshape == NULL)
                        return -EINVAL;
                err = mddev->pers->start_reshape(mddev);
                if (err)
                        return err;
+               sysfs_notify(&mddev->kobj, NULL, "degraded");
        } else {
                if (cmd_match(page, "check"))
                        set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -2903,6 +3141,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
        }
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
+       sysfs_notify_dirent(mddev->sysfs_action);
        return len;
 }
 
@@ -2979,14 +3218,44 @@ degraded_show(mddev_t *mddev, char *page)
 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
 
 static ssize_t
+sync_force_parallel_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%d\n", mddev->parallel_resync);
+}
+
+static ssize_t
+sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       long n;
+
+       if (strict_strtol(buf, 10, &n))
+               return -EINVAL;
+
+       if (n != 0 && n != 1)
+               return -EINVAL;
+
+       mddev->parallel_resync = n;
+
+       if (mddev->sync_thread)
+               wake_up(&resync_wait);
+
+       return len;
+}
+
+/* force parallel resync, even with shared block devices */
+static struct md_sysfs_entry md_sync_force_parallel =
+__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
+       sync_force_parallel_show, sync_force_parallel_store);
+
+static ssize_t
 sync_speed_show(mddev_t *mddev, char *page)
 {
        unsigned long resync, dt, db;
-       resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
-       dt = ((jiffies - mddev->resync_mark) / HZ);
+       resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
+       dt = (jiffies - mddev->resync_mark) / HZ;
        if (!dt) dt++;
-       db = resync - (mddev->resync_mark_cnt);
-       return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
+       db = resync - mddev->resync_mark_cnt;
+       return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
 }
 
 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
@@ -3008,6 +3277,36 @@ sync_completed_show(mddev_t *mddev, char *page)
 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
 
 static ssize_t
+min_sync_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n",
+                      (unsigned long long)mddev->resync_min);
+}
+static ssize_t
+min_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       unsigned long long min;
+       if (strict_strtoull(buf, 10, &min))
+               return -EINVAL;
+       if (min > mddev->resync_max)
+               return -EINVAL;
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+               return -EBUSY;
+
+       /* Must be a multiple of chunk_size */
+       if (mddev->chunk_size) {
+               if (min & (sector_t)((mddev->chunk_size>>9)-1))
+                       return -EINVAL;
+       }
+       mddev->resync_min = min;
+
+       return len;
+}
+
+static struct md_sysfs_entry md_min_sync =
+__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
+
+static ssize_t
 max_sync_show(mddev_t *mddev, char *page)
 {
        if (mddev->resync_max == MaxSector)
@@ -3022,9 +3321,10 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
        if (strncmp(buf, "max", 3) == 0)
                mddev->resync_max = MaxSector;
        else {
-               char *ep;
-               unsigned long long max = simple_strtoull(buf, &ep, 10);
-               if (ep == buf || (*ep != 0 && *ep != '\n'))
+               unsigned long long max;
+               if (strict_strtoull(buf, 10, &max))
+                       return -EINVAL;
+               if (max < mddev->resync_min)
                        return -EINVAL;
                if (max < mddev->resync_max &&
                    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -3153,7 +3453,9 @@ static struct attribute *md_redundancy_attrs[] = {
        &md_sync_min.attr,
        &md_sync_max.attr,
        &md_sync_speed.attr,
+       &md_sync_force_parallel.attr,
        &md_sync_completed.attr,
+       &md_min_sync.attr,
        &md_max_sync.attr,
        &md_suspend_lo.attr,
        &md_suspend_hi.attr,
@@ -3197,6 +3499,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
        rv = mddev_lock(mddev);
+       if (mddev->hold_active == UNTIL_IOCTL)
+               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
@@ -3207,6 +3511,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
 static void md_free(struct kobject *ko)
 {
        mddev_t *mddev = container_of(ko, mddev_t, kobj);
+
+       if (mddev->sysfs_state)
+               sysfs_put(mddev->sysfs_state);
+
+       if (mddev->gendisk) {
+               del_gendisk(mddev->gendisk);
+               put_disk(mddev->gendisk);
+       }
+       if (mddev->queue)
+               blk_cleanup_queue(mddev->queue);
+
        kfree(mddev);
 }
 
@@ -3222,58 +3537,150 @@ static struct kobj_type md_ktype = {
 
 int mdp_major = 0;
 
-static struct kobject *md_probe(dev_t dev, int *part, void *data)
+static void mddev_delayed_delete(struct work_struct *ws)
+{
+       mddev_t *mddev = container_of(ws, mddev_t, del_work);
+
+       if (mddev->private == &md_redundancy_group) {
+               sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+               if (mddev->sysfs_action)
+                       sysfs_put(mddev->sysfs_action);
+               mddev->sysfs_action = NULL;
+               mddev->private = NULL;
+       }
+       kobject_del(&mddev->kobj);
+       kobject_put(&mddev->kobj);
+}
+
+static int md_alloc(dev_t dev, char *name)
 {
        static DEFINE_MUTEX(disks_mutex);
        mddev_t *mddev = mddev_find(dev);
        struct gendisk *disk;
-       int partitioned = (MAJOR(dev) != MD_MAJOR);
-       int shift = partitioned ? MdpMinorShift : 0;
-       int unit = MINOR(dev) >> shift;
+       int partitioned;
+       int shift;
+       int unit;
        int error;
 
        if (!mddev)
-               return NULL;
+               return -ENODEV;
+
+       partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
+       shift = partitioned ? MdpMinorShift : 0;
+       unit = MINOR(mddev->unit) >> shift;
+
+       /* wait for any previous instance if this device
+        * to be completed removed (mddev_delayed_delete).
+        */
+       flush_scheduled_work();
 
        mutex_lock(&disks_mutex);
        if (mddev->gendisk) {
                mutex_unlock(&disks_mutex);
                mddev_put(mddev);
-               return NULL;
+               return -EEXIST;
        }
+
+       if (name) {
+               /* Need to ensure that 'name' is not a duplicate.
+                */
+               mddev_t *mddev2;
+               spin_lock(&all_mddevs_lock);
+
+               list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
+                       if (mddev2->gendisk &&
+                           strcmp(mddev2->gendisk->disk_name, name) == 0) {
+                               spin_unlock(&all_mddevs_lock);
+                               return -EEXIST;
+                       }
+               spin_unlock(&all_mddevs_lock);
+       }
+
+       mddev->queue = blk_alloc_queue(GFP_KERNEL);
+       if (!mddev->queue) {
+               mutex_unlock(&disks_mutex);
+               mddev_put(mddev);
+               return -ENOMEM;
+       }
+       /* Can be unlocked because the queue is new: no concurrency */
+       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
+
+       blk_queue_make_request(mddev->queue, md_fail_request);
+
        disk = alloc_disk(1 << shift);
        if (!disk) {
                mutex_unlock(&disks_mutex);
+               blk_cleanup_queue(mddev->queue);
+               mddev->queue = NULL;
                mddev_put(mddev);
-               return NULL;
+               return -ENOMEM;
        }
-       disk->major = MAJOR(dev);
+       disk->major = MAJOR(mddev->unit);
        disk->first_minor = unit << shift;
-       if (partitioned)
+       if (name)
+               strcpy(disk->disk_name, name);
+       else if (partitioned)
                sprintf(disk->disk_name, "md_d%d", unit);
        else
                sprintf(disk->disk_name, "md%d", unit);
        disk->fops = &md_fops;
        disk->private_data = mddev;
        disk->queue = mddev->queue;
+       /* Allow extended partitions.  This makes the
+        * 'mdp' device redundant, but we can't really
+        * remove it now.
+        */
+       disk->flags |= GENHD_FL_EXT_DEVT;
        add_disk(disk);
        mddev->gendisk = disk;
+       error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+                                    &disk_to_dev(disk)->kobj, "%s", "md");
        mutex_unlock(&disks_mutex);
-       error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
-                                    "%s", "md");
        if (error)
                printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
                       disk->disk_name);
-       else
+       else {
                kobject_uevent(&mddev->kobj, KOBJ_ADD);
+               mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+       }
+       mddev_put(mddev);
+       return 0;
+}
+
+static struct kobject *md_probe(dev_t dev, int *part, void *data)
+{
+       md_alloc(dev, NULL);
        return NULL;
 }
 
+static int add_named_array(const char *val, struct kernel_param *kp)
+{
+       /* val must be "md_*" where * is not all digits.
+        * We allocate an array with a large free minor number, and
+        * set the name to val.  val must not already be an active name.
+        */
+       int len = strlen(val);
+       char buf[DISK_NAME_LEN];
+
+       while (len && val[len-1] == '\n')
+               len--;
+       if (len >= DISK_NAME_LEN)
+               return -E2BIG;
+       strlcpy(buf, val, len+1);
+       if (strncmp(buf, "md_", 3) != 0)
+               return -EINVAL;
+       return md_alloc(0, buf);
+}
+
 static void md_safemode_timeout(unsigned long data)
 {
        mddev_t *mddev = (mddev_t *) data;
 
-       mddev->safemode = 1;
+       if (!atomic_read(&mddev->writes_pending)) {
+               mddev->safemode = 1;
+               if (mddev->external)
+                       sysfs_notify_dirent(mddev->sysfs_state);
+       }
        md_wakeup_thread(mddev->thread);
 }
 
@@ -3283,7 +3690,6 @@ static int do_md_run(mddev_t * mddev)
 {
        int err;
        int chunk_size;
-       struct list_head *tmp;
        mdk_rdev_t *rdev;
        struct gendisk *disk;
        struct mdk_personality *pers;
@@ -3314,20 +3720,15 @@ static int do_md_run(mddev_t * mddev)
                        return -EINVAL;
                }
                /*
-                * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
+                * chunk-size has to be a power of 2
                 */
                if ( (1 << ffz(~chunk_size)) != chunk_size) {
                        printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
                        return -EINVAL;
                }
-               if (chunk_size < PAGE_SIZE) {
-                       printk(KERN_ERR "too small chunk_size: %d < %ld\n",
-                               chunk_size, PAGE_SIZE);
-                       return -EINVAL;
-               }
 
                /* devices must have minimum size of one chunk */
-               rdev_for_each(rdev, tmp, mddev) {
+               list_for_each_entry(rdev, &mddev->disks, same_set) {
                        if (test_bit(Faulty, &rdev->flags))
                                continue;
                        if (rdev->size < chunk_size / 1024) {
@@ -3342,19 +3743,17 @@ static int do_md_run(mddev_t * mddev)
                }
        }
 
-#ifdef CONFIG_KMOD
        if (mddev->level != LEVEL_NONE)
                request_module("md-level-%d", mddev->level);
        else if (mddev->clevel[0])
                request_module("md-%s", mddev->clevel);
-#endif
 
        /*
         * Drop all container device buffers, from now on
         * the only valid external interface is through the md
         * device.
         */
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
@@ -3364,22 +3763,23 @@ static int do_md_run(mddev_t * mddev)
                 * We don't want the data to overlap the metadata,
                 * Internal Bitmap issues has handled elsewhere.
                 */
-               if (rdev->data_offset < rdev->sb_offset) {
+               if (rdev->data_offset < rdev->sb_start) {
                        if (mddev->size &&
                            rdev->data_offset + mddev->size*2
-                           > rdev->sb_offset*2) {
+                           > rdev->sb_start) {
                                printk("md: %s: data overlaps metadata\n",
                                       mdname(mddev));
                                return -EINVAL;
                        }
                } else {
-                       if (rdev->sb_offset*2 + rdev->sb_size/512
+                       if (rdev->sb_start + rdev->sb_size/512
                            > rdev->data_offset) {
                                printk("md: %s: metadata overlaps data\n",
                                       mdname(mddev));
                                return -EINVAL;
                        }
                }
+               sysfs_notify_dirent(rdev->sysfs_state);
        }
 
        md_probe(mddev->unit, NULL, NULL);
@@ -3418,10 +3818,10 @@ static int do_md_run(mddev_t * mddev)
                 */
                char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
                mdk_rdev_t *rdev2;
-               struct list_head *tmp2;
                int warned = 0;
-               rdev_for_each(rdev, tmp, mddev) {
-                       rdev_for_each(rdev2, tmp2, mddev) {
+
+               list_for_each_entry(rdev, &mddev->disks, same_set)
+                       list_for_each_entry(rdev2, &mddev->disks, same_set) {
                                if (rdev < rdev2 &&
                                    rdev->bdev->bd_contains ==
                                    rdev2->bdev->bd_contains) {
@@ -3435,7 +3835,7 @@ static int do_md_run(mddev_t * mddev)
                                        warned = 1;
                                }
                        }
-               }
+
                if (warned)
                        printk(KERN_WARNING
                               "True protection against single-disk"
@@ -3451,7 +3851,9 @@ static int do_md_run(mddev_t * mddev)
                mddev->ro = 2; /* read-only, but switch on first write */
 
        err = mddev->pers->run(mddev);
-       if (!err && mddev->pers->sync_request) {
+       if (err)
+               printk(KERN_ERR "md: pers->run() failed ...\n");
+       else if (mddev->pers->sync_request) {
                err = bitmap_create(mddev);
                if (err) {
                        printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
@@ -3460,7 +3862,6 @@ static int do_md_run(mddev_t * mddev)
                }
        }
        if (err) {
-               printk(KERN_ERR "md: pers->run() failed ...\n");
                module_put(mddev->pers->owner);
                mddev->pers = NULL;
                bitmap_destroy(mddev);
@@ -3471,6 +3872,7 @@ static int do_md_run(mddev_t * mddev)
                        printk(KERN_WARNING
                               "md: cannot register extra attributes for %s\n",
                               mdname(mddev));
+               mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
        } else if (mddev->ro == 2) /* auto-readonly not meaningful */
                mddev->ro = 0;
 
@@ -3481,7 +3883,7 @@ static int do_md_run(mddev_t * mddev)
        mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
        mddev->in_sync = 1;
 
-       rdev_for_each(rdev, tmp, mddev)
+       list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->raid_disk >= 0) {
                        char nm[20];
                        sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3495,7 +3897,7 @@ static int do_md_run(mddev_t * mddev)
        if (mddev->flags)
                md_update_sb(mddev, 0);
 
-       set_capacity(disk, mddev->array_size<<1);
+       set_capacity(disk, mddev->array_sectors);
 
        /* If we call blk_queue_make_request here, it will
         * re-initialise max_sectors etc which may have been
@@ -3512,9 +3914,8 @@ static int do_md_run(mddev_t * mddev)
         * it will remove the drives and not do the right thing
         */
        if (mddev->degraded && !mddev->sync_thread) {
-               struct list_head *rtmp;
                int spares = 0;
-               rdev_for_each(rdev, rtmp, mddev)
+               list_for_each_entry(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(In_sync, &rdev->flags) &&
                            !test_bit(Faulty, &rdev->flags))
@@ -3540,45 +3941,36 @@ static int do_md_run(mddev_t * mddev)
 
        mddev->changed = 1;
        md_new_event(mddev);
-       kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
+       sysfs_notify_dirent(mddev->sysfs_state);
+       if (mddev->sysfs_action)
+               sysfs_notify_dirent(mddev->sysfs_action);
+       sysfs_notify(&mddev->kobj, NULL, "degraded");
+       kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
        return 0;
 }
 
 static int restart_array(mddev_t *mddev)
 {
        struct gendisk *disk = mddev->gendisk;
-       int err;
 
-       /*
-        * Complain if it has no devices
-        */
-       err = -ENXIO;
+       /* Complain if it has no devices */
        if (list_empty(&mddev->disks))
-               goto out;
-
-       if (mddev->pers) {
-               err = -EBUSY;
-               if (!mddev->ro)
-                       goto out;
-
-               mddev->safemode = 0;
-               mddev->ro = 0;
-               set_disk_ro(disk, 0);
-
-               printk(KERN_INFO "md: %s switched to read-write mode.\n",
-                       mdname(mddev));
-               /*
-                * Kick recovery or resync if necessary
-                */
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-               md_wakeup_thread(mddev->thread);
-               md_wakeup_thread(mddev->sync_thread);
-               err = 0;
-       } else
-               err = -EINVAL;
-
-out:
-       return err;
+               return -ENXIO;
+       if (!mddev->pers)
+               return -EINVAL;
+       if (!mddev->ro)
+               return -EBUSY;
+       mddev->safemode = 0;
+       mddev->ro = 0;
+       set_disk_ro(disk, 0);
+       printk(KERN_INFO "md: %s switched to read-write mode.\n",
+               mdname(mddev));
+       /* Kick recovery or resync if necessary */
+       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       md_wakeup_thread(mddev->thread);
+       md_wakeup_thread(mddev->sync_thread);
+       sysfs_notify_dirent(mddev->sysfs_state);
+       return 0;
 }
 
 /* similar to deny_write_access, but accounts for our holding a reference
@@ -3612,16 +4004,17 @@ static void restore_bitmap_write_access(struct file *file)
  *   1 - switch to readonly
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(mddev_t * mddev, int mode)
+static int do_md_stop(mddev_t * mddev, int mode, int is_open)
 {
        int err = 0;
        struct gendisk *disk = mddev->gendisk;
 
+       if (atomic_read(&mddev->openers) > is_open) {
+               printk("md: %s still in use.\n",mdname(mddev));
+               return -EBUSY;
+       }
+
        if (mddev->pers) {
-               if (atomic_read(&mddev->active)>2) {
-                       printk("md: %s still in use.\n",mdname(mddev));
-                       return -EBUSY;
-               }
 
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -3632,8 +4025,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
 
                del_timer_sync(&mddev->safemode_timer);
 
-               invalidate_partition(disk, 0);
-
                switch(mode) {
                case 1: /* readonly */
                        err  = -ENXIO;
@@ -3652,11 +4043,12 @@ static int do_md_stop(mddev_t * mddev, int mode)
                        mddev->queue->merge_bvec_fn = NULL;
                        mddev->queue->unplug_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
-                       if (mddev->pers->sync_request)
-                               sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
-
                        module_put(mddev->pers->owner);
+                       if (mddev->pers->sync_request)
+                               mddev->private = &md_redundancy_group;
                        mddev->pers = NULL;
+                       /* tell userspace to handle 'inactive' */
+                       sysfs_notify_dirent(mddev->sysfs_state);
 
                        set_capacity(disk, 0);
                        mddev->changed = 1;
@@ -3679,7 +4071,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
         */
        if (mode == 0) {
                mdk_rdev_t *rdev;
-               struct list_head *tmp;
 
                printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
@@ -3691,7 +4082,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
                }
                mddev->bitmap_offset = 0;
 
-               rdev_for_each(rdev, tmp, mddev)
+               list_for_each_entry(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0) {
                                char nm[20];
                                sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3703,20 +4094,49 @@ static int do_md_stop(mddev_t * mddev, int mode)
 
                export_array(mddev);
 
-               mddev->array_size = 0;
+               mddev->array_sectors = 0;
                mddev->size = 0;
                mddev->raid_disks = 0;
                mddev->recovery_cp = 0;
+               mddev->resync_min = 0;
                mddev->resync_max = MaxSector;
                mddev->reshape_position = MaxSector;
                mddev->external = 0;
                mddev->persistent = 0;
+               mddev->level = LEVEL_NONE;
+               mddev->clevel[0] = 0;
+               mddev->flags = 0;
+               mddev->ro = 0;
+               mddev->metadata_type[0] = 0;
+               mddev->chunk_size = 0;
+               mddev->ctime = mddev->utime = 0;
+               mddev->layout = 0;
+               mddev->max_disks = 0;
+               mddev->events = 0;
+               mddev->delta_disks = 0;
+               mddev->new_level = LEVEL_NONE;
+               mddev->new_layout = 0;
+               mddev->new_chunk = 0;
+               mddev->curr_resync = 0;
+               mddev->resync_mismatches = 0;
+               mddev->suspend_lo = mddev->suspend_hi = 0;
+               mddev->sync_speed_min = mddev->sync_speed_max = 0;
+               mddev->recovery = 0;
+               mddev->in_sync = 0;
+               mddev->changed = 0;
+               mddev->degraded = 0;
+               mddev->barriers_work = 0;
+               mddev->safemode = 0;
+               kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+               if (mddev->hold_active == UNTIL_STOP)
+                       mddev->hold_active = 0;
 
        } else if (mddev->pers)
                printk(KERN_INFO "md: %s switched to read-only mode.\n",
                        mdname(mddev));
        err = 0;
        md_new_event(mddev);
+       sysfs_notify_dirent(mddev->sysfs_state);
 out:
        return err;
 }
@@ -3725,7 +4145,6 @@ out:
 static void autorun_array(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
        int err;
 
        if (list_empty(&mddev->disks))
@@ -3733,16 +4152,16 @@ static void autorun_array(mddev_t *mddev)
 
        printk(KERN_INFO "md: running: ");
 
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                char b[BDEVNAME_SIZE];
                printk("<%s>", bdevname(rdev->bdev,b));
        }
        printk("\n");
 
-       err = do_md_run (mddev);
+       err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop (mddev, 0);
+               do_md_stop(mddev, 0, 0);
        }
 }
 
@@ -3760,8 +4179,7 @@ static void autorun_array(mddev_t *mddev)
  */
 static void autorun_devices(int part)
 {
-       struct list_head *tmp;
-       mdk_rdev_t *rdev0, *rdev;
+       mdk_rdev_t *rdev0, *rdev, *tmp;
        mddev_t *mddev;
        char b[BDEVNAME_SIZE];
 
@@ -3776,7 +4194,7 @@ static void autorun_devices(int part)
                printk(KERN_INFO "md: considering %s ...\n",
                        bdevname(rdev0->bdev,b));
                INIT_LIST_HEAD(&candidates);
-               rdev_for_each_list(rdev, tmp, pending_raid_disks)
+               rdev_for_each_list(rdev, tmp, &pending_raid_disks)
                        if (super_90_load(rdev, rdev0, 0) >= 0) {
                                printk(KERN_INFO "md:  adding %s ...\n",
                                        bdevname(rdev->bdev,b));
@@ -3803,8 +4221,10 @@ static void autorun_devices(int part)
 
                md_probe(dev, NULL, NULL);
                mddev = mddev_find(dev);
-               if (!mddev) {
-                       printk(KERN_ERR 
+               if (!mddev || !mddev->gendisk) {
+                       if (mddev)
+                               mddev_put(mddev);
+                       printk(KERN_ERR
                                "md: cannot allocate memory for md drive.\n");
                        break;
                }
@@ -3820,7 +4240,7 @@ static void autorun_devices(int part)
                } else {
                        printk(KERN_INFO "md: created %s\n", mdname(mddev));
                        mddev->persistent = 1;
-                       rdev_for_each_list(rdev, tmp, candidates) {
+                       rdev_for_each_list(rdev, tmp, &candidates) {
                                list_del_init(&rdev->same_set);
                                if (bind_rdev_to_array(rdev, mddev))
                                        export_rdev(rdev);
@@ -3831,8 +4251,10 @@ static void autorun_devices(int part)
                /* on success, candidates will be empty, on error
                 * it won't...
                 */
-               rdev_for_each_list(rdev, tmp, candidates)
+               rdev_for_each_list(rdev, tmp, &candidates) {
+                       list_del_init(&rdev->same_set);
                        export_rdev(rdev);
+               }
                mddev_put(mddev);
        }
        printk(KERN_INFO "md: ... autorun DONE.\n");
@@ -3858,10 +4280,9 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        mdu_array_info_t info;
        int nr,working,active,failed,spare;
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
 
        nr=working=active=failed=spare=0;
-       rdev_for_each(rdev, tmp, mddev) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                nr++;
                if (test_bit(Faulty, &rdev->flags))
                        failed++;
@@ -3913,9 +4334,11 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
        char *ptr, *buf = NULL;
        int err = -ENOMEM;
 
-       md_allow_write(mddev);
+       if (md_allow_write(mddev))
+               file = kmalloc(sizeof(*file), GFP_NOIO);
+       else
+               file = kmalloc(sizeof(*file), GFP_KERNEL);
 
-       file = kmalloc(sizeof(*file), GFP_KERNEL);
        if (!file)
                goto out;
 
@@ -3929,8 +4352,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
        if (!buf)
                goto out;
 
-       ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
-       if (!ptr)
+       ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
+       if (IS_ERR(ptr))
                goto out;
 
        strcpy(file->pathname, ptr);
@@ -3948,15 +4371,12 @@ out:
 static int get_disk_info(mddev_t * mddev, void __user * arg)
 {
        mdu_disk_info_t info;
-       unsigned int nr;
        mdk_rdev_t *rdev;
 
        if (copy_from_user(&info, arg, sizeof(info)))
                return -EFAULT;
 
-       nr = info.number;
-
-       rdev = find_rdev_nr(mddev, nr);
+       rdev = find_rdev_nr(mddev, info.number);
        if (rdev) {
                info.major = MAJOR(rdev->bdev->bd_dev);
                info.minor = MINOR(rdev->bdev->bd_dev);
@@ -4076,8 +4496,12 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                }
                if (err)
                        export_rdev(rdev);
+               else
+                       sysfs_notify_dirent(rdev->sysfs_state);
 
                md_update_sb(mddev, 1);
+               if (mddev->degraded)
+                       set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                return err;
@@ -4094,7 +4518,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
 
        if (!(info->state & (1<<MD_DISK_FAULTY))) {
                int err;
-               rdev = md_import_device (dev, -1, 0);
+               rdev = md_import_device(dev, -1, 0);
                if (IS_ERR(rdev)) {
                        printk(KERN_WARNING 
                                "md: error, md_import_device() returned %ld\n",
@@ -4116,10 +4540,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
 
                if (!mddev->persistent) {
                        printk(KERN_INFO "md: nonpersistent superblock ...\n");
-                       rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+                       rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
                } else 
-                       rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
-               rdev->size = calc_dev_size(rdev, mddev->chunk_size);
+                       rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+               rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
 
                err = bind_rdev_to_array(rdev, mddev);
                if (err) {
@@ -4136,9 +4560,6 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
        char b[BDEVNAME_SIZE];
        mdk_rdev_t *rdev;
 
-       if (!mddev->pers)
-               return -ENODEV;
-
        rdev = find_rdev(mddev, dev);
        if (!rdev)
                return -ENXIO;
@@ -4161,7 +4582,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
 {
        char b[BDEVNAME_SIZE];
        int err;
-       unsigned int size;
        mdk_rdev_t *rdev;
 
        if (!mddev->pers)
@@ -4180,7 +4600,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
                return -EINVAL;
        }
 
-       rdev = md_import_device (dev, -1, 0);
+       rdev = md_import_device(dev, -1, 0);
        if (IS_ERR(rdev)) {
                printk(KERN_WARNING 
                        "md: error, md_import_device() returned %ld\n",
@@ -4189,13 +4609,11 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
        }
 
        if (mddev->persistent)
-               rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
+               rdev->sb_start = calc_dev_sboffset(rdev->bdev);
        else
-               rdev->sb_offset =
-                       rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+               rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
 
-       size = calc_dev_size(rdev, mddev->chunk_size);
-       rdev->size = size;
+       rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2;
 
        if (test_bit(Faulty, &rdev->flags)) {
                printk(KERN_WARNING 
@@ -4216,13 +4634,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
         * noticed in interrupt contexts ...
         */
 
-       if (rdev->desc_nr == mddev->max_disks) {
-               printk(KERN_WARNING "%s: can not hot-add to full array!\n",
-                       mdname(mddev));
-               err = -EBUSY;
-               goto abort_unbind_export;
-       }
-
        rdev->raid_disk = -1;
 
        md_update_sb(mddev, 1);
@@ -4236,9 +4647,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
        md_new_event(mddev);
        return 0;
 
-abort_unbind_export:
-       unbind_rdev_from_array(rdev);
-
 abort_export:
        export_rdev(rdev);
        return err;
@@ -4380,44 +4788,49 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
        return 0;
 }
 
-static int update_size(mddev_t *mddev, unsigned long size)
+static int update_size(mddev_t *mddev, sector_t num_sectors)
 {
-       mdk_rdev_t * rdev;
+       mdk_rdev_t *rdev;
        int rv;
-       struct list_head *tmp;
-       int fit = (size == 0);
+       int fit = (num_sectors == 0);
 
        if (mddev->pers->resize == NULL)
                return -EINVAL;
-       /* The "size" is the amount of each device that is used.
-        * This can only make sense for arrays with redundancy.
-        * linear and raid0 always use whatever space is available
-        * We can only consider changing the size if no resync
-        * or reconstruction is happening, and if the new size
-        * is acceptable. It must fit before the sb_offset or,
-        * if that is <data_offset, it must fit before the
-        * size of each device.
-        * If size is zero, we find the largest size that fits.
+       /* The "num_sectors" is the number of sectors of each device that
+        * is used.  This can only make sense for arrays with redundancy.
+        * linear and raid0 always use whatever space is available. We can only
+        * consider changing this number if no resync or reconstruction is
+        * happening, and if the new size is acceptable. It must fit before the
+        * sb_start or, if that is <data_offset, it must fit before the size
+        * of each device.  If num_sectors is zero, we find the largest size
+        * that fits.
+
         */
        if (mddev->sync_thread)
                return -EBUSY;
-       rdev_for_each(rdev, tmp, mddev) {
+       if (mddev->bitmap)
+               /* Sorry, cannot grow a bitmap yet, just remove it,
+                * grow, and re-add.
+                */
+               return -EBUSY;
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                sector_t avail;
                avail = rdev->size * 2;
 
-               if (fit && (size == 0 || size > avail/2))
-                       size = avail/2;
-               if (avail < ((sector_t)size << 1))
+               if (fit && (num_sectors == 0 || num_sectors > avail))
+                       num_sectors = avail;
+               if (avail < num_sectors)
                        return -ENOSPC;
        }
-       rv = mddev->pers->resize(mddev, (sector_t)size *2);
+       rv = mddev->pers->resize(mddev, num_sectors);
        if (!rv) {
                struct block_device *bdev;
 
                bdev = bdget_disk(mddev->gendisk, 0);
                if (bdev) {
                        mutex_lock(&bdev->bd_inode->i_mutex);
-                       i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
+                       i_size_write(bdev->bd_inode,
+                                    (loff_t)mddev->array_sectors << 9);
                        mutex_unlock(&bdev->bd_inode->i_mutex);
                        bdput(bdev);
                }
@@ -4492,7 +4905,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
                        return mddev->pers->reconfig(mddev, info->layout, -1);
        }
        if (info->size >= 0 && mddev->size != info->size)
-               rv = update_size(mddev, info->size);
+               rv = update_size(mddev, (sector_t)info->size * 2);
 
        if (mddev->raid_disks    != info->raid_disks)
                rv = update_raid_disks(mddev, info->raid_disks);
@@ -4545,6 +4958,12 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
        return 0;
 }
 
+/*
+ * We have a problem here : there is no easy way to give a CHS
+ * virtual geometry. We currently pretend that we have a 2 heads
+ * 4 sectors (with a BIG number of cylinders...). This drives
+ * dosfs just mad... ;-)
+ */
 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
        mddev_t *mddev = bdev->bd_disk->private_data;
@@ -4555,7 +4974,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
        return 0;
 }
 
-static int md_ioctl(struct inode *inode, struct file *file,
+static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        unsigned int cmd, unsigned long arg)
 {
        int err = 0;
@@ -4593,7 +5012,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
         * Commands creating/starting a new array:
         */
 
-       mddev = inode->i_bdev->bd_disk->private_data;
+       mddev = bdev->bd_disk->private_data;
 
        if (!mddev) {
                BUG();
@@ -4689,19 +5108,13 @@ static int md_ioctl(struct inode *inode, struct file *file,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop (mddev, 0);
+                       err = do_md_stop(mddev, 0, 1);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = do_md_stop (mddev, 1);
+                       err = do_md_stop(mddev, 1, 1);
                        goto done_unlock;
 
-       /*
-        * We have a problem here : there is no easy way to give a CHS
-        * virtual geometry. We currently pretend that we have a 2 heads
-        * 4 sectors (with a BIG number of cylinders...). This drives
-        * dosfs just mad... ;-)
-        */
        }
 
        /*
@@ -4711,13 +5124,12 @@ static int md_ioctl(struct inode *inode, struct file *file,
         * here and hit the 'default' below, so only disallow
         * 'md' ioctls, and switch to rw mode if started auto-readonly.
         */
-       if (_IOC_TYPE(cmd) == MD_MAJOR &&
-           mddev->ro && mddev->pers) {
+       if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
                if (mddev->ro == 2) {
                        mddev->ro = 0;
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-               md_wakeup_thread(mddev->thread);
-
+                       sysfs_notify_dirent(mddev->sysfs_state);
+                       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       md_wakeup_thread(mddev->thread);
                } else {
                        err = -EROFS;
                        goto abort_unlock;
@@ -4749,7 +5161,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
                        goto done_unlock;
 
                case RUN_ARRAY:
-                       err = do_md_run (mddev);
+                       err = do_md_run(mddev);
                        goto done_unlock;
 
                case SET_BITMAP_FILE:
@@ -4763,6 +5175,9 @@ static int md_ioctl(struct inode *inode, struct file *file,
 
 done_unlock:
 abort_unlock:
+       if (mddev->hold_active == UNTIL_IOCTL &&
+           err != -EINVAL)
+               mddev->hold_active = 0;
        mddev_unlock(mddev);
 
        return err;
@@ -4773,32 +5188,45 @@ abort:
        return err;
 }
 
-static int md_open(struct inode *inode, struct file *file)
+static int md_open(struct block_device *bdev, fmode_t mode)
 {
        /*
         * Succeed if we can lock the mddev, which confirms that
         * it isn't being stopped right now.
         */
-       mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+       mddev_t *mddev = mddev_find(bdev->bd_dev);
        int err;
 
+       if (mddev->gendisk != bdev->bd_disk) {
+               /* we are racing with mddev_put which is discarding this
+                * bd_disk.
+                */
+               mddev_put(mddev);
+               /* Wait until bdev->bd_disk is definitely gone */
+               flush_scheduled_work();
+               /* Then retry the open from the top */
+               return -ERESTARTSYS;
+       }
+       BUG_ON(mddev != bdev->bd_disk->private_data);
+
        if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
                goto out;
 
        err = 0;
-       mddev_get(mddev);
+       atomic_inc(&mddev->openers);
        mddev_unlock(mddev);
 
-       check_disk_change(inode->i_bdev);
+       check_disk_change(bdev);
  out:
        return err;
 }
 
-static int md_release(struct inode *inode, struct file * file)
+static int md_release(struct gendisk *disk, fmode_t mode)
 {
-       mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+       mddev_t *mddev = disk->private_data;
 
        BUG_ON(!mddev);
+       atomic_dec(&mddev->openers);
        mddev_put(mddev);
 
        return 0;
@@ -4823,7 +5251,7 @@ static struct block_device_operations md_fops =
        .owner          = THIS_MODULE,
        .open           = md_open,
        .release        = md_release,
-       .ioctl          = md_ioctl,
+       .locked_ioctl   = md_ioctl,
        .getgeo         = md_getgeo,
        .media_changed  = md_media_changed,
        .revalidate_disk= md_revalidate,
@@ -4918,6 +5346,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
 
        if (!rdev || test_bit(Faulty, &rdev->flags))
                return;
+
+       if (mddev->external)
+               set_bit(Blocked, &rdev->flags);
 /*
        dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
                mdname(mddev),
@@ -4930,6 +5361,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
        if (!mddev->pers->error_handler)
                return;
        mddev->pers->error_handler(mddev,rdev);
+       if (mddev->degraded)
+               set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+       set_bit(StateChanged, &rdev->flags);
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
@@ -4942,11 +5376,10 @@ static void status_unused(struct seq_file *seq)
 {
        int i = 0;
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
 
        seq_printf(seq, "unused devices: ");
 
-       rdev_for_each_list(rdev, tmp, pending_raid_disks) {
+       list_for_each_entry(rdev, &pending_raid_disks, same_set) {
                char b[BDEVNAME_SIZE];
                i++;
                seq_printf(seq, "%s ",
@@ -5105,7 +5538,6 @@ static int md_seq_show(struct seq_file *seq, void *v)
 {
        mddev_t *mddev = v;
        sector_t size;
-       struct list_head *tmp2;
        mdk_rdev_t *rdev;
        struct mdstat_info *mi = seq->private;
        struct bitmap *bitmap;
@@ -5142,7 +5574,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
                }
 
                size = 0;
-               rdev_for_each(rdev, tmp2, mddev) {
+               list_for_each_entry(rdev, &mddev->disks, same_set) {
                        char b[BDEVNAME_SIZE];
                        seq_printf(seq, " %s[%d]",
                                bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -5159,10 +5591,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
                if (!list_empty(&mddev->disks)) {
                        if (mddev->pers)
                                seq_printf(seq, "\n      %llu blocks",
-                                       (unsigned long long)mddev->array_size);
+                                          (unsigned long long)
+                                          mddev->array_sectors / 2);
                        else
                                seq_printf(seq, "\n      %llu blocks",
-                                       (unsigned long long)size);
+                                          (unsigned long long)size);
                }
                if (mddev->persistent) {
                        if (mddev->major_version != 0 ||
@@ -5178,11 +5611,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
                        seq_printf(seq, " super non-persistent");
 
                if (mddev->pers) {
-                       mddev->pers->status (seq, mddev);
+                       mddev->pers->status(seq, mddev);
                        seq_printf(seq, "\n      ");
                        if (mddev->pers->sync_request) {
                                if (mddev->curr_resync > 2) {
-                                       status_resync (seq, mddev);
+                                       status_resync(seq, mddev);
                                        seq_printf(seq, "\n      ");
                                } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
                                        seq_printf(seq, "\tresync=DELAYED\n      ");
@@ -5292,15 +5725,15 @@ int unregister_md_personality(struct mdk_personality *p)
 static int is_mddev_idle(mddev_t *mddev)
 {
        mdk_rdev_t * rdev;
-       struct list_head *tmp;
        int idle;
        long curr_events;
 
        idle = 1;
-       rdev_for_each(rdev, tmp, mddev) {
+       rcu_read_lock();
+       rdev_for_each_rcu(rdev, mddev) {
                struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
-               curr_events = disk_stat_read(disk, sectors[0]) + 
-                               disk_stat_read(disk, sectors[1]) - 
+               curr_events = part_stat_read(&disk->part0, sectors[0]) +
+                               part_stat_read(&disk->part0, sectors[1]) -
                                atomic_read(&disk->sync_io);
                /* sync IO will cause sync_io to increase before the disk_stats
                 * as sync_io is counted when a request starts, and
@@ -5329,6 +5762,7 @@ static int is_mddev_idle(mddev_t *mddev)
                        idle = 0;
                }
        }
+       rcu_read_unlock();
        return idle;
 }
 
@@ -5338,7 +5772,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
        atomic_sub(blocks, &mddev->recovery_active);
        wake_up(&mddev->recovery_wait);
        if (!ok) {
-               set_bit(MD_RECOVERY_ERR, &mddev->recovery);
+               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                // stop recovery, signal do_sync ....
        }
@@ -5352,6 +5786,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
  */
 void md_write_start(mddev_t *mddev, struct bio *bi)
 {
+       int did_change = 0;
        if (bio_data_dir(bi) != WRITE)
                return;
 
@@ -5362,18 +5797,26 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                md_wakeup_thread(mddev->sync_thread);
+               did_change = 1;
        }
        atomic_inc(&mddev->writes_pending);
+       if (mddev->safemode == 1)
+               mddev->safemode = 0;
        if (mddev->in_sync) {
                spin_lock_irq(&mddev->write_lock);
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
                        set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        md_wakeup_thread(mddev->thread);
+                       did_change = 1;
                }
                spin_unlock_irq(&mddev->write_lock);
        }
-       wait_event(mddev->sb_wait, mddev->flags==0);
+       if (did_change)
+               sysfs_notify_dirent(mddev->sysfs_state);
+       wait_event(mddev->sb_wait,
+                  !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
+                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
 }
 
 void md_write_end(mddev_t *mddev)
@@ -5391,13 +5834,18 @@ void md_write_end(mddev_t *mddev)
  * may proceed without blocking.  It is important to call this before
  * attempting a GFP_KERNEL allocation while holding the mddev lock.
  * Must be called with mddev_lock held.
+ *
+ * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
+ * is dropped, so return -EAGAIN after notifying userspace.
  */
-void md_allow_write(mddev_t *mddev)
+int md_allow_write(mddev_t *mddev)
 {
        if (!mddev->pers)
-               return;
+               return 0;
        if (mddev->ro)
-               return;
+               return 0;
+       if (!mddev->pers->sync_request)
+               return 0;
 
        spin_lock_irq(&mddev->write_lock);
        if (mddev->in_sync) {
@@ -5408,13 +5856,17 @@ void md_allow_write(mddev_t *mddev)
                        mddev->safemode = 1;
                spin_unlock_irq(&mddev->write_lock);
                md_update_sb(mddev, 0);
+               sysfs_notify_dirent(mddev->sysfs_state);
        } else
                spin_unlock_irq(&mddev->write_lock);
+
+       if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+               return -EAGAIN;
+       else
+               return 0;
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
-static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
-
 #define SYNC_MARKS     10
 #define        SYNC_MARK_STEP  (3*HZ)
 void md_do_sync(mddev_t *mddev)
@@ -5429,7 +5881,6 @@ void md_do_sync(mddev_t *mddev)
        struct list_head *tmp;
        sector_t last_check;
        int skipped = 0;
-       struct list_head *rtmp;
        mdk_rdev_t *rdev;
        char *desc;
 
@@ -5478,8 +5929,9 @@ void md_do_sync(mddev_t *mddev)
                for_each_mddev(mddev2, tmp) {
                        if (mddev2 == mddev)
                                continue;
-                       if (mddev2->curr_resync && 
-                           match_mddev_units(mddev,mddev2)) {
+                       if (!mddev->parallel_resync
+                       &&  mddev2->curr_resync
+                       &&  match_mddev_units(mddev, mddev2)) {
                                DEFINE_WAIT(wq);
                                if (mddev < mddev2 && mddev->curr_resync == 2) {
                                        /* arbitrarily yield */
@@ -5491,7 +5943,11 @@ void md_do_sync(mddev_t *mddev)
                                         * time 'round when curr_resync == 2
                                         */
                                        continue;
-                               prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+                               /* We need to wait 'interruptible' so as not to
+                                * contribute to the load average, and not to
+                                * be caught by 'softlockup'
+                                */
+                               prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
                                if (!kthread_should_stop() &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
                                        printk(KERN_INFO "md: delaying %s of %s"
@@ -5499,6 +5955,8 @@ void md_do_sync(mddev_t *mddev)
                                               " share one or more physical units)\n",
                                               desc, mdname(mddev), mdname(mddev2));
                                        mddev_put(mddev2);
+                                       if (signal_pending(current))
+                                               flush_signals(current);
                                        schedule();
                                        finish_wait(&resync_wait, &wq);
                                        goto try_again;
@@ -5516,16 +5974,18 @@ void md_do_sync(mddev_t *mddev)
                max_sectors = mddev->resync_max_sectors;
                mddev->resync_mismatches = 0;
                /* we don't use the checkpoint if there's a bitmap */
-               if (!mddev->bitmap &&
-                   !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+               if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       j = mddev->resync_min;
+               else if (!mddev->bitmap)
                        j = mddev->recovery_cp;
+
        } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
                max_sectors = mddev->size << 1;
        else {
                /* recovery follows the physical size of devices */
                max_sectors = mddev->size << 1;
                j = MaxSector;
-               rdev_for_each(rdev, rtmp, mddev)
+               list_for_each_entry(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(In_sync, &rdev->flags) &&
@@ -5559,7 +6019,6 @@ void md_do_sync(mddev_t *mddev)
                window/2,(unsigned long long) max_sectors/2);
 
        atomic_set(&mddev->recovery_active, 0);
-       init_waitqueue_head(&mddev->recovery_wait);
        last_check = 0;
 
        if (j>2) {
@@ -5584,7 +6043,7 @@ void md_do_sync(mddev_t *mddev)
                sectors = mddev->pers->sync_request(mddev, j, &skipped,
                                                  currspeed < speed_min(mddev));
                if (sectors == 0) {
-                       set_bit(MD_RECOVERY_ERR, &mddev->recovery);
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto out;
                }
 
@@ -5607,8 +6066,7 @@ void md_do_sync(mddev_t *mddev)
 
                last_check = io_sectors;
 
-               if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
-                   test_bit(MD_RECOVERY_ERR, &mddev->recovery))
+               if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        break;
 
        repeat:
@@ -5662,8 +6120,7 @@ void md_do_sync(mddev_t *mddev)
        /* tell personality that we are finished */
        mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
 
-       if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
-           !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+       if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
            mddev->curr_resync > 2) {
                if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                        if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5678,7 +6135,7 @@ void md_do_sync(mddev_t *mddev)
                } else {
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
-                       rdev_for_each(rdev, rtmp, mddev)
+                       list_for_each_entry(rdev, &mddev->disks, same_set)
                                if (rdev->raid_disk >= 0 &&
                                    !test_bit(Faulty, &rdev->flags) &&
                                    !test_bit(In_sync, &rdev->flags) &&
@@ -5690,6 +6147,7 @@ void md_do_sync(mddev_t *mddev)
 
  skip:
        mddev->curr_resync = 0;
+       mddev->resync_min = 0;
        mddev->resync_max = MaxSector;
        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
@@ -5713,12 +6171,11 @@ EXPORT_SYMBOL_GPL(md_do_sync);
 static int remove_and_add_spares(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
-       struct list_head *rtmp;
        int spares = 0;
 
-       rdev_for_each(rdev, rtmp, mddev)
+       list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->raid_disk >= 0 &&
-                   !mddev->external &&
+                   !test_bit(Blocked, &rdev->flags) &&
                    (test_bit(Faulty, &rdev->flags) ||
                     ! test_bit(In_sync, &rdev->flags)) &&
                    atomic_read(&rdev->nr_pending)==0) {
@@ -5731,12 +6188,17 @@ static int remove_and_add_spares(mddev_t *mddev)
                        }
                }
 
-       if (mddev->degraded) {
-               rdev_for_each(rdev, rtmp, mddev)
+       if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+               list_for_each_entry(rdev, &mddev->disks, same_set) {
+                       if (rdev->raid_disk >= 0 &&
+                           !test_bit(In_sync, &rdev->flags) &&
+                           !test_bit(Blocked, &rdev->flags))
+                               spares++;
                        if (rdev->raid_disk < 0
                            && !test_bit(Faulty, &rdev->flags)) {
                                rdev->recovery_offset = 0;
-                               if (mddev->pers->hot_add_disk(mddev,rdev)) {
+                               if (mddev->pers->
+                                   hot_add_disk(mddev, rdev) == 0) {
                                        char nm[20];
                                        sprintf(nm, "rd%d", rdev->raid_disk);
                                        if (sysfs_create_link(&mddev->kobj,
@@ -5750,6 +6212,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                                } else
                                        break;
                        }
+               }
        }
        return spares;
 }
@@ -5763,7 +6226,7 @@ static int remove_and_add_spares(mddev_t *mddev)
  * to do that as needed.
  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
  * "->recovery" and create a thread at ->sync_thread.
- * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
+ * When the thread finishes it sets MD_RECOVERY_DONE
  * and wakeups up this thread which will reap the thread and finish up.
  * This thread also removes any faulty devices (with nr_pending == 0).
  *
@@ -5778,7 +6241,6 @@ static int remove_and_add_spares(mddev_t *mddev)
 void md_check_recovery(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
-       struct list_head *rtmp;
 
 
        if (mddev->bitmap)
@@ -5788,7 +6250,7 @@ void md_check_recovery(mddev_t *mddev)
                return;
 
        if (signal_pending(current)) {
-               if (mddev->pers->sync_request) {
+               if (mddev->pers->sync_request && !mddev->external) {
                        printk(KERN_INFO "md: %s in immediate safe mode\n",
                               mdname(mddev));
                        mddev->safemode = 2;
@@ -5796,11 +6258,13 @@ void md_check_recovery(mddev_t *mddev)
                flush_signals(current);
        }
 
+       if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+               return;
        if ( ! (
                (mddev->flags && !mddev->external) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
-               (mddev->safemode == 1) ||
+               (mddev->external == 0 && mddev->safemode == 1) ||
                (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
                 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
                ))
@@ -5809,20 +6273,41 @@ void md_check_recovery(mddev_t *mddev)
        if (mddev_trylock(mddev)) {
                int spares = 0;
 
-               spin_lock_irq(&mddev->write_lock);
-               if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
-                   !mddev->in_sync && mddev->recovery_cp == MaxSector) {
-                       mddev->in_sync = 1;
-                       if (mddev->persistent)
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               if (mddev->ro) {
+                       /* Only thing we do on a ro array is remove
+                        * failed devices.
+                        */
+                       remove_and_add_spares(mddev);
+                       clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       goto unlock;
+               }
+
+               if (!mddev->external) {
+                       int did_change = 0;
+                       spin_lock_irq(&mddev->write_lock);
+                       if (mddev->safemode &&
+                           !atomic_read(&mddev->writes_pending) &&
+                           !mddev->in_sync &&
+                           mddev->recovery_cp == MaxSector) {
+                               mddev->in_sync = 1;
+                               did_change = 1;
+                               if (mddev->persistent)
+                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       }
+                       if (mddev->safemode == 1)
+                               mddev->safemode = 0;
+                       spin_unlock_irq(&mddev->write_lock);
+                       if (did_change)
+                               sysfs_notify_dirent(mddev->sysfs_state);
                }
-               if (mddev->safemode == 1)
-                       mddev->safemode = 0;
-               spin_unlock_irq(&mddev->write_lock);
 
                if (mddev->flags)
                        md_update_sb(mddev, 0);
 
+               list_for_each_entry(rdev, &mddev->disks, same_set)
+                       if (test_and_clear_bit(StateChanged, &rdev->flags))
+                               sysfs_notify_dirent(rdev->sysfs_state);
+
 
                if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
                    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -5834,11 +6319,13 @@ void md_check_recovery(mddev_t *mddev)
                        /* resync has finished, collect result */
                        md_unregister_thread(mddev->sync_thread);
                        mddev->sync_thread = NULL;
-                       if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
-                           !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+                       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+                           !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                                /* success...*/
                                /* activate any spares */
-                               mddev->pers->spare_active(mddev);
+                               if (mddev->pers->spare_active(mddev))
+                                       sysfs_notify(&mddev->kobj, NULL,
+                                                    "degraded");
                        }
                        md_update_sb(mddev, 1);
 
@@ -5846,20 +6333,24 @@ void md_check_recovery(mddev_t *mddev)
                         * information must be scrapped
                         */
                        if (!mddev->degraded)
-                               rdev_for_each(rdev, rtmp, mddev)
+                               list_for_each_entry(rdev, &mddev->disks, same_set)
                                        rdev->saved_raid_disk = -1;
 
                        mddev->recovery = 0;
                        /* flag recovery needed just to double check */
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       sysfs_notify_dirent(mddev->sysfs_action);
                        md_new_event(mddev);
                        goto unlock;
                }
+               /* Set RUNNING before clearing NEEDED to avoid
+                * any transients in the value of "sync_action".
+                */
+               set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                /* Clear some bits that don't mean anything, but
                 * might be left set
                 */
-               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-               clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
                clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
                clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
 
@@ -5877,17 +6368,20 @@ void md_check_recovery(mddev_t *mddev)
                                /* Cannot proceed */
                                goto unlock;
                        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if ((spares = remove_and_add_spares(mddev))) {
                        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+                       set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if (mddev->recovery_cp < MaxSector) {
                        set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
                        /* nothing to be done ... */
                        goto unlock;
 
                if (mddev->pers->sync_request) {
-                       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                        if (spares && mddev->bitmap && ! mddev->bitmap->file) {
                                /* We are adding a device or devices to an array
                                 * which has the bitmap stored on all devices.
@@ -5906,13 +6400,31 @@ void md_check_recovery(mddev_t *mddev)
                                mddev->recovery = 0;
                        } else
                                md_wakeup_thread(mddev->sync_thread);
+                       sysfs_notify_dirent(mddev->sysfs_action);
                        md_new_event(mddev);
                }
        unlock:
+               if (!mddev->sync_thread) {
+                       clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+                       if (test_and_clear_bit(MD_RECOVERY_RECOVER,
+                                              &mddev->recovery))
+                               if (mddev->sysfs_action)
+                                       sysfs_notify_dirent(mddev->sysfs_action);
+               }
                mddev_unlock(mddev);
        }
 }
 
+void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
+{
+       sysfs_notify_dirent(rdev->sysfs_state);
+       wait_event_timeout(rdev->blocked_wait,
+                          !test_bit(Blocked, &rdev->flags),
+                          msecs_to_jiffies(5000));
+       rdev_dec_pending(rdev, mddev);
+}
+EXPORT_SYMBOL(md_wait_for_blocked_rdev);
+
 static int md_notify_reboot(struct notifier_block *this,
                            unsigned long code, void *x)
 {
@@ -5925,7 +6437,11 @@ static int md_notify_reboot(struct notifier_block *this,
 
                for_each_mddev(mddev, tmp)
                        if (mddev_trylock(mddev)) {
-                               do_md_stop (mddev, 1);
+                               /* Force a switch to readonly even array
+                                * appears to still be in use.  Hence
+                                * the '100'.
+                                */
+                               do_md_stop(mddev, 1, 100);
                                mddev_unlock(mddev);
                        }
                /*
@@ -5969,7 +6485,7 @@ static int __init md_init(void)
        raid_table_header = register_sysctl_table(raid_root_table);
 
        md_geninit();
-       return (0);
+       return 0;
 }
 
 
@@ -6055,14 +6571,8 @@ static __exit void md_exit(void)
        unregister_sysctl_table(raid_table_header);
        remove_proc_entry("mdstat", NULL);
        for_each_mddev(mddev, tmp) {
-               struct gendisk *disk = mddev->gendisk;
-               if (!disk)
-                       continue;
                export_array(mddev);
-               del_gendisk(disk);
-               put_disk(disk);
-               mddev->gendisk = NULL;
-               mddev_put(mddev);
+               mddev->hold_active = 0;
        }
 }
 
@@ -6087,6 +6597,7 @@ static int set_ro(const char *val, struct kernel_param *kp)
 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
 
+module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
 
 EXPORT_SYMBOL(register_md_personality);
 EXPORT_SYMBOL(unregister_md_personality);