drivers/edac: fix ignored return i82875p
[safe/jmp/linux-2.6] / drivers / md / md.c
index 40cb79a..65ddc88 100644 (file)
 */
 
 #include <linux/module.h>
+#include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/linkage.h>
 #include <linux/raid/md.h>
 #include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include <linux/init.h>
 
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
        atomic_set(&new->active, 1);
        spin_lock_init(&new->write_lock);
        init_waitqueue_head(&new->sb_wait);
+       new->reshape_position = MaxSector;
 
        new->queue = blk_alloc_queue(GFP_KERNEL);
        if (!new->queue) {
@@ -389,8 +391,12 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
        if (bio->bi_size)
                return 1;
 
-       if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
+       if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+               printk("md: super_written gets error=%d, uptodate=%d\n",
+                      error, test_bit(BIO_UPTODATE, &bio->bi_flags));
+               WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
                md_error(mddev, rdev);
+       }
 
        if (atomic_dec_and_test(&mddev->pending_writes))
                wake_up(&mddev->sb_wait);
@@ -585,14 +591,41 @@ abort:
        return ret;
 }
 
+
+static u32 md_csum_fold(u32 csum)
+{
+       csum = (csum & 0xffff) + (csum >> 16);
+       return (csum & 0xffff) + (csum >> 16);
+}
+
 static unsigned int calc_sb_csum(mdp_super_t * sb)
 {
+       u64 newcsum = 0;
+       u32 *sb32 = (u32*)sb;
+       int i;
        unsigned int disk_csum, csum;
 
        disk_csum = sb->sb_csum;
        sb->sb_csum = 0;
-       csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
+
+       for (i = 0; i < MD_SB_BYTES/4 ; i++)
+               newcsum += sb32[i];
+       csum = (newcsum & 0xffffffff) + (newcsum>>32);
+
+
+#ifdef CONFIG_ALPHA
+       /* This used to use csum_partial, which was wrong for several
+        * reasons including that different results are returned on
+        * different architectures.  It isn't critical that we get exactly
+        * the same return value as before (we always csum_fold before
+        * testing, and that removes any differences).  However as we
+        * know that csum_partial always returned a 16bit value on
+        * alphas, do a fold to maximise conformity to previous behaviour.
+        */
+       sb->sb_csum = md_csum_fold(disk_csum);
+#else
        sb->sb_csum = disk_csum;
+#endif
        return csum;
 }
 
@@ -680,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        if (sb->raid_disks <= 0)
                goto abort;
 
-       if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
+       if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
                printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
                        b);
                goto abort;
@@ -690,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        rdev->data_offset = 0;
        rdev->sb_size = MD_SB_BYTES;
 
+       if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
+               if (sb->level != 1 && sb->level != 4
+                   && sb->level != 5 && sb->level != 6
+                   && sb->level != 10) {
+                       /* FIXME use a better test */
+                       printk(KERN_WARNING
+                              "md: bitmaps not supported for this level.\n");
+                       goto abort;
+               }
+       }
+
        if (sb->level == LEVEL_MULTIPATH)
                rdev->desc_nr = -1;
        else
@@ -788,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks = MD_SB_DISKS;
 
                if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
-                   mddev->bitmap_file == NULL) {
-                       if (mddev->level != 1 && mddev->level != 4
-                           && mddev->level != 5 && mddev->level != 6
-                           && mddev->level != 10) {
-                               /* FIXME use a better test */
-                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
-                               return -EINVAL;
-                       }
+                   mddev->bitmap_file == NULL)
                        mddev->bitmap_offset = mddev->default_bitmap_offset;
-               }
 
        } else if (mddev->pers == NULL) {
                /* Insist on good event counter while assembling */
@@ -970,12 +1006,13 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
  * version 1 superblock
  */
 
-static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
+static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
 {
-       unsigned int disk_csum, csum;
+       __le32 disk_csum;
+       u32 csum;
        unsigned long long newcsum;
        int size = 256 + le32_to_cpu(sb->max_dev)*2;
-       unsigned int *isuper = (unsigned int*)sb;
+       __le32 *isuper = (__le32*)sb;
        int i;
 
        disk_csum = sb->sb_csum;
@@ -985,7 +1022,7 @@ static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
                newcsum += le32_to_cpu(*isuper++);
 
        if (size == 2)
-               newcsum += le16_to_cpu(*(unsigned short*) isuper);
+               newcsum += le16_to_cpu(*(__le16*) isuper);
 
        csum = (newcsum & 0xffffffff) + (newcsum >> 32);
        sb->sb_csum = disk_csum;
@@ -1053,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
                       bdevname(rdev->bdev,b));
                return -EINVAL;
        }
+       if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
+               if (sb->level != cpu_to_le32(1) &&
+                   sb->level != cpu_to_le32(4) &&
+                   sb->level != cpu_to_le32(5) &&
+                   sb->level != cpu_to_le32(6) &&
+                   sb->level != cpu_to_le32(10)) {
+                       printk(KERN_WARNING
+                              "md: bitmaps not supported for this level.\n");
+                       return -EINVAL;
+               }
+       }
+
        rdev->preferred_minor = 0xffff;
        rdev->data_offset = le64_to_cpu(sb->data_offset);
        atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1102,7 +1151,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        if (le32_to_cpu(sb->chunksize))
                rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
 
-       if (le32_to_cpu(sb->size) > rdev->size*2)
+       if (le64_to_cpu(sb->size) > rdev->size*2)
                return -EINVAL;
        return ret;
 }
@@ -1136,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks =  (4096-256)/2;
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
-                   mddev->bitmap_file == NULL ) {
-                       if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
-                           && mddev->level != 10) {
-                               printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
-                               return -EINVAL;
-                       }
+                   mddev->bitmap_file == NULL )
                        mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
-               }
+
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
                        mddev->reshape_position = le64_to_cpu(sb->reshape_position);
                        mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -1224,7 +1268,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        else
                sb->resync_offset = cpu_to_le64(0);
 
-       sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
+       sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
 
        sb->raid_disks = cpu_to_le32(mddev->raid_disks);
        sb->size = cpu_to_le64(mddev->size<<1);
@@ -1254,8 +1298,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        ITERATE_RDEV(mddev,rdev2,tmp)
                if (rdev2->desc_nr+1 > max_dev)
                        max_dev = rdev2->desc_nr+1;
-       
-       sb->max_dev = cpu_to_le32(max_dev);
+
+       if (max_dev > le32_to_cpu(sb->max_dev))
+               sb->max_dev = cpu_to_le32(max_dev);
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
@@ -1291,27 +1336,17 @@ static struct super_type super_types[] = {
                .sync_super     = super_1_sync,
        },
 };
-       
-static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
-{
-       struct list_head *tmp;
-       mdk_rdev_t *rdev;
-
-       ITERATE_RDEV(mddev,rdev,tmp)
-               if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
-                       return rdev;
-
-       return NULL;
-}
 
 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
 {
-       struct list_head *tmp;
-       mdk_rdev_t *rdev;
+       struct list_head *tmp, *tmp2;
+       mdk_rdev_t *rdev, *rdev2;
 
        ITERATE_RDEV(mddev1,rdev,tmp)
-               if (match_dev_unit(mddev2, rdev))
-                       return 1;
+               ITERATE_RDEV(mddev2, rdev2, tmp2)
+                       if (rdev->bdev->bd_contains ==
+                           rdev2->bdev->bd_contains)
+                               return 1;
 
        return 0;
 }
@@ -1320,10 +1355,10 @@ static LIST_HEAD(pending_raid_disks);
 
 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
 {
-       mdk_rdev_t *same_pdev;
-       char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+       char b[BDEVNAME_SIZE];
        struct kobject *ko;
        char *s;
+       int err;
 
        if (rdev->mddev) {
                MD_BUG();
@@ -1331,20 +1366,16 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        }
        /* make sure rdev->size exceeds mddev->size */
        if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
-               if (mddev->pers)
-                       /* Cannot change size, so fail */
-                       return -ENOSPC;
-               else
+               if (mddev->pers) {
+                       /* Cannot change size, so fail
+                        * If mddev->level <= 0, then we don't care
+                        * about aligning sizes (e.g. linear)
+                        */
+                       if (mddev->level > 0)
+                               return -ENOSPC;
+               } else
                        mddev->size = rdev->size;
        }
-       same_pdev = match_dev_unit(mddev, rdev);
-       if (same_pdev)
-               printk(KERN_WARNING
-                       "%s: WARNING: %s appears to be on the same physical"
-                       " disk as %s. True\n     protection against single-disk"
-                       " failure might be compromised.\n",
-                       mdname(mddev), bdevname(rdev->bdev,b),
-                       bdevname(same_pdev->bdev,b2));
 
        /* Verify rdev->desc_nr is unique.
         * If it is -1, assign a free number, else
@@ -1366,20 +1397,35 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
                *s = '!';
                        
-       list_add(&rdev->same_set, &mddev->disks);
        rdev->mddev = mddev;
        printk(KERN_INFO "md: bind<%s>\n", b);
 
        rdev->kobj.parent = &mddev->kobj;
-       kobject_add(&rdev->kobj);
+       if ((err = kobject_add(&rdev->kobj)))
+               goto fail;
 
        if (rdev->bdev->bd_part)
                ko = &rdev->bdev->bd_part->kobj;
        else
                ko = &rdev->bdev->bd_disk->kobj;
-       sysfs_create_link(&rdev->kobj, ko, "block");
+       if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
+               kobject_del(&rdev->kobj);
+               goto fail;
+       }
+       list_add(&rdev->same_set, &mddev->disks);
        bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
        return 0;
+
+ fail:
+       printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
+              b, mdname(mddev));
+       return err;
+}
+
+static void delayed_delete(struct work_struct *ws)
+{
+       mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
+       kobject_del(&rdev->kobj);
 }
 
 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
@@ -1394,7 +1440,12 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
        printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
        rdev->mddev = NULL;
        sysfs_remove_link(&rdev->kobj, "block");
-       kobject_del(&rdev->kobj);
+
+       /* We need to delay this, otherwise we can deadlock when
+        * writing to 'remove' to "dev/state"
+        */
+       INIT_WORK(&rdev->del_work, delayed_delete);
+       schedule_work(&rdev->del_work);
 }
 
 /*
@@ -1408,7 +1459,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
        struct block_device *bdev;
        char b[BDEVNAME_SIZE];
 
-       bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+       bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
        if (IS_ERR(bdev)) {
                printk(KERN_ERR "md: could not open %s.\n",
                        __bdevname(dev, b));
@@ -1418,7 +1469,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
        if (err) {
                printk(KERN_ERR "md: could not bd_claim %s.\n",
                        bdevname(bdev, b));
-               blkdev_put_partition(bdev);
+               blkdev_put(bdev);
                return err;
        }
        rdev->bdev = bdev;
@@ -1432,7 +1483,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
        if (!bdev)
                MD_BUG();
        bd_release(bdev);
-       blkdev_put_partition(bdev);
+       blkdev_put(bdev);
 }
 
 void md_autodetect_dev(dev_t dev);
@@ -1589,7 +1640,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
 
 static void md_update_sb(mddev_t * mddev, int force_change)
 {
-       int err;
        struct list_head *tmp;
        mdk_rdev_t *rdev;
        int sync_req;
@@ -1628,7 +1678,8 @@ repeat:
         * and 'events' is odd, we can roll back to the previous clean state */
        if (nospares
            && (mddev->in_sync && mddev->recovery_cp == MaxSector)
-           && (mddev->events & 1))
+           && (mddev->events & 1)
+           && mddev->events != 1)
                mddev->events--;
        else {
                /* otherwise we have to go forward and ... */
@@ -1675,7 +1726,7 @@ repeat:
                "md: updating %s RAID superblock on device (in sync %d)\n",
                mdname(mddev),mddev->in_sync);
 
-       err = bitmap_update_sb(mddev->bitmap);
+       bitmap_update_sb(mddev->bitmap);
        ITERATE_RDEV(mddev,rdev,tmp) {
                char b[BDEVNAME_SIZE];
                dprintk(KERN_INFO "md: ");
@@ -1787,7 +1838,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                else {
                        mddev_t *mddev = rdev->mddev;
                        kick_rdev_from_array(rdev);
-                       md_update_sb(mddev, 1);
+                       if (mddev->pers)
+                               md_update_sb(mddev, 1);
                        md_new_event(mddev);
                        err = 0;
                }
@@ -1998,6 +2050,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
        kobject_init(&rdev->kobj);
 
        rdev->desc_nr = -1;
+       rdev->saved_raid_disk = -1;
+       rdev->raid_disk = -1;
        rdev->flags = 0;
        rdev->data_offset = 0;
        rdev->sb_events = 0;
@@ -2018,9 +2072,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
                err = super_types[super_format].
                        load_super(rdev, NULL, super_minor);
                if (err == -EINVAL) {
-                       printk(KERN_WARNING 
-                               "md: %s has invalid sb, not importing!\n",
-                               bdevname(rdev->bdev,b));
+                       printk(KERN_WARNING
+                               "md: %s does not have a valid v%d.%d "
+                              "superblock, not importing!\n",
+                               bdevname(rdev->bdev,b),
+                              super_format, super_minor);
                        goto abort_free;
                }
                if (err < 0) {
@@ -2092,6 +2148,9 @@ static void analyze_sbs(mddev_t * mddev)
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
                        set_bit(In_sync, &rdev->flags);
+               } else if (rdev->raid_disk >= mddev->raid_disks) {
+                       rdev->raid_disk = -1;
+                       clear_bit(In_sync, &rdev->flags);
                }
        }
 
@@ -2193,6 +2252,10 @@ static ssize_t
 layout_show(mddev_t *mddev, char *page)
 {
        /* just a number, not meaningful for all levels */
+       if (mddev->reshape_position != MaxSector &&
+           mddev->layout != mddev->new_layout)
+               return sprintf(page, "%d (%d)\n",
+                              mddev->new_layout, mddev->layout);
        return sprintf(page, "%d\n", mddev->layout);
 }
 
@@ -2201,13 +2264,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
 {
        char *e;
        unsigned long n = simple_strtoul(buf, &e, 10);
-       if (mddev->pers)
-               return -EBUSY;
 
        if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
-       mddev->layout = n;
+       if (mddev->pers)
+               return -EBUSY;
+       if (mddev->reshape_position != MaxSector)
+               mddev->new_layout = n;
+       else
+               mddev->layout = n;
        return len;
 }
 static struct md_sysfs_entry md_layout =
@@ -2219,6 +2285,10 @@ raid_disks_show(mddev_t *mddev, char *page)
 {
        if (mddev->raid_disks == 0)
                return 0;
+       if (mddev->reshape_position != MaxSector &&
+           mddev->delta_disks != 0)
+               return sprintf(page, "%d (%d)\n", mddev->raid_disks,
+                              mddev->raid_disks - mddev->delta_disks);
        return sprintf(page, "%d\n", mddev->raid_disks);
 }
 
@@ -2227,7 +2297,6 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks);
 static ssize_t
 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
 {
-       /* can only set raid_disks if array is not yet active */
        char *e;
        int rv = 0;
        unsigned long n = simple_strtoul(buf, &e, 10);
@@ -2237,7 +2306,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
 
        if (mddev->pers)
                rv = update_raid_disks(mddev, n);
-       else
+       else if (mddev->reshape_position != MaxSector) {
+               int olddisks = mddev->raid_disks - mddev->delta_disks;
+               mddev->delta_disks = n - olddisks;
+               mddev->raid_disks = n;
+       } else
                mddev->raid_disks = n;
        return rv ? rv : len;
 }
@@ -2247,6 +2320,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
 static ssize_t
 chunk_size_show(mddev_t *mddev, char *page)
 {
+       if (mddev->reshape_position != MaxSector &&
+           mddev->chunk_size != mddev->new_chunk)
+               return sprintf(page, "%d (%d)\n", mddev->new_chunk,
+                              mddev->chunk_size);
        return sprintf(page, "%d\n", mddev->chunk_size);
 }
 
@@ -2257,12 +2334,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
        char *e;
        unsigned long n = simple_strtoul(buf, &e, 10);
 
-       if (mddev->pers)
-               return -EBUSY;
        if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
-       mddev->chunk_size = n;
+       if (mddev->pers)
+               return -EBUSY;
+       else if (mddev->reshape_position != MaxSector)
+               mddev->new_chunk = n;
+       else
+               mddev->chunk_size = n;
        return len;
 }
 static struct md_sysfs_entry md_chunk_size =
@@ -2625,10 +2705,9 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
                return -EINVAL;
        buf = e+1;
        minor = simple_strtoul(buf, &e, 10);
-       if (e==buf || *e != '\n')
+       if (e==buf || (*e && *e != '\n') )
                return -EINVAL;
-       if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
-           super_types[major].name == NULL)
+       if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
                return -ENOENT;
        mddev->major_version = major;
        mddev->minor_version = minor;
@@ -2849,6 +2928,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
 static struct md_sysfs_entry md_suspend_hi =
 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
 
+static ssize_t
+reshape_position_show(mddev_t *mddev, char *page)
+{
+       if (mddev->reshape_position != MaxSector)
+               return sprintf(page, "%llu\n",
+                              (unsigned long long)mddev->reshape_position);
+       strcpy(page, "none\n");
+       return 5;
+}
+
+static ssize_t
+reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long long new = simple_strtoull(buf, &e, 10);
+       if (mddev->pers)
+               return -EBUSY;
+       if (buf == e || (*e && *e != '\n'))
+               return -EINVAL;
+       mddev->reshape_position = new;
+       mddev->delta_disks = 0;
+       mddev->new_level = mddev->level;
+       mddev->new_layout = mddev->layout;
+       mddev->new_chunk = mddev->chunk_size;
+       return len;
+}
+
+static struct md_sysfs_entry md_reshape_position =
+__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
+       reshape_position_store);
+
 
 static struct attribute *md_default_attrs[] = {
        &md_level.attr,
@@ -2861,6 +2971,7 @@ static struct attribute *md_default_attrs[] = {
        &md_new_device.attr,
        &md_safe_delay.attr,
        &md_array_state.attr,
+       &md_reshape_position.attr,
        NULL,
 };
 
@@ -2977,7 +3088,9 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        mddev->kobj.k_name = NULL;
        snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
        mddev->kobj.ktype = &md_ktype;
-       kobject_register(&mddev->kobj);
+       if (kobject_register(&mddev->kobj))
+               printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
+                      disk->disk_name);
        return NULL;
 }
 
@@ -3062,13 +3175,33 @@ static int do_md_run(mddev_t * mddev)
         * Drop all container device buffers, from now on
         * the only valid external interface is through the md
         * device.
-        * Also find largest hardsector size
         */
        ITERATE_RDEV(mddev,rdev,tmp) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
                sync_blockdev(rdev->bdev);
-               invalidate_bdev(rdev->bdev, 0);
+               invalidate_bdev(rdev->bdev);
+
+               /* perform some consistency tests on the device.
+                * We don't want the data to overlap the metadata,
+                * Internal Bitmap issues has handled elsewhere.
+                */
+               if (rdev->data_offset < rdev->sb_offset) {
+                       if (mddev->size &&
+                           rdev->data_offset + mddev->size*2
+                           > rdev->sb_offset*2) {
+                               printk("md: %s: data overlaps metadata\n",
+                                      mdname(mddev));
+                               return -EINVAL;
+                       }
+               } else {
+                       if (rdev->sb_offset*2 + rdev->sb_size/512
+                           > rdev->data_offset) {
+                               printk("md: %s: metadata overlaps data\n",
+                                      mdname(mddev));
+                               return -EINVAL;
+                       }
+               }
        }
 
        md_probe(mddev->unit, NULL, NULL);
@@ -3101,6 +3234,36 @@ static int do_md_run(mddev_t * mddev)
                return -EINVAL;
        }
 
+       if (pers->sync_request) {
+               /* Warn if this is a potentially silly
+                * configuration.
+                */
+               char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+               mdk_rdev_t *rdev2;
+               struct list_head *tmp2;
+               int warned = 0;
+               ITERATE_RDEV(mddev, rdev, tmp) {
+                       ITERATE_RDEV(mddev, rdev2, tmp2) {
+                               if (rdev < rdev2 &&
+                                   rdev->bdev->bd_contains ==
+                                   rdev2->bdev->bd_contains) {
+                                       printk(KERN_WARNING
+                                              "%s: WARNING: %s appears to be"
+                                              " on the same physical disk as"
+                                              " %s.\n",
+                                              mdname(mddev),
+                                              bdevname(rdev->bdev,b),
+                                              bdevname(rdev2->bdev,b2));
+                                       warned = 1;
+                               }
+                       }
+               }
+               if (warned)
+                       printk(KERN_WARNING
+                              "True protection against single-disk"
+                              " failure might be compromised.\n");
+       }
+
        mddev->recovery = 0;
        mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
        mddev->barriers_work = 1;
@@ -3125,9 +3288,12 @@ static int do_md_run(mddev_t * mddev)
                bitmap_destroy(mddev);
                return err;
        }
-       if (mddev->pers->sync_request)
-               sysfs_create_group(&mddev->kobj, &md_redundancy_group);
-       else if (mddev->ro == 2) /* auto-readonly not meaningful */
+       if (mddev->pers->sync_request) {
+               if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+                       printk(KERN_WARNING
+                              "md: cannot register extra attributes for %s\n",
+                              mdname(mddev));
+       } else if (mddev->ro == 2) /* auto-readonly not meaningful */
                mddev->ro = 0;
 
        atomic_set(&mddev->writes_pending,0);
@@ -3141,7 +3307,9 @@ static int do_md_run(mddev_t * mddev)
                if (rdev->raid_disk >= 0) {
                        char nm[20];
                        sprintf(nm, "rd%d", rdev->raid_disk);
-                       sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+                       if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
+                               printk("md: cannot register %s for %s\n",
+                                      nm, mdname(mddev));
                }
        
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -3194,6 +3362,7 @@ static int do_md_run(mddev_t * mddev)
 
        mddev->changed = 1;
        md_new_event(mddev);
+       kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
        return 0;
 }
 
@@ -3302,11 +3471,19 @@ static int do_md_stop(mddev_t * mddev, int mode)
                                set_disk_ro(disk, 0);
                        blk_queue_make_request(mddev->queue, md_fail_request);
                        mddev->pers->stop(mddev);
+                       mddev->queue->merge_bvec_fn = NULL;
+                       mddev->queue->unplug_fn = NULL;
+                       mddev->queue->issue_flush_fn = NULL;
+                       mddev->queue->backing_dev_info.congested_fn = NULL;
                        if (mddev->pers->sync_request)
                                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
 
                        module_put(mddev->pers->owner);
                        mddev->pers = NULL;
+
+                       set_capacity(disk, 0);
+                       mddev->changed = 1;
+
                        if (mddev->ro)
                                mddev->ro = 0;
                }
@@ -3326,7 +3503,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
        if (mode == 0) {
                mdk_rdev_t *rdev;
                struct list_head *tmp;
-               struct gendisk *disk;
+
                printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
                bitmap_destroy(mddev);
@@ -3344,17 +3521,17 @@ static int do_md_stop(mddev_t * mddev, int mode)
                                sysfs_remove_link(&mddev->kobj, nm);
                        }
 
+               /* make sure all delayed_delete calls have finished */
+               flush_scheduled_work();
+
                export_array(mddev);
 
                mddev->array_size = 0;
                mddev->size = 0;
                mddev->raid_disks = 0;
                mddev->recovery_cp = 0;
+               mddev->reshape_position = MaxSector;
 
-               disk = mddev->gendisk;
-               if (disk)
-                       set_capacity(disk, 0);
-               mddev->changed = 1;
        } else if (mddev->pers)
                printk(KERN_INFO "md: %s switched to read-only mode.\n",
                        mdname(mddev));
@@ -3364,6 +3541,7 @@ out:
        return err;
 }
 
+#ifndef MODULE
 static void autorun_array(mddev_t *mddev)
 {
        mdk_rdev_t *rdev;
@@ -3478,6 +3656,7 @@ static void autorun_devices(int part)
        }
        printk(KERN_INFO "md: ... autorun DONE.\n");
 }
+#endif /* !MODULE */
 
 static int get_version(void __user * arg)
 {
@@ -3553,6 +3732,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
        char *ptr, *buf = NULL;
        int err = -ENOMEM;
 
+       md_allow_write(mddev);
+
        file = kmalloc(sizeof(*file), GFP_KERNEL);
        if (!file)
                goto out;
@@ -3715,6 +3896,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                if (err)
                        export_rdev(rdev);
 
+               md_update_sb(mddev, 1);
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                return err;
@@ -3845,6 +4027,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
        }
        clear_bit(In_sync, &rdev->flags);
        rdev->desc_nr = -1;
+       rdev->saved_raid_disk = -1;
        err = bind_rdev_to_array(rdev, mddev);
        if (err)
                goto abort_export;
@@ -3958,7 +4141,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
        if (info->raid_disks == 0) {
                /* just setting version number for superblock loading */
                if (info->major_version < 0 ||
-                   info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
+                   info->major_version >= ARRAY_SIZE(super_types) ||
                    super_types[info->major_version].name == NULL) {
                        /* maybe try to auto-load a module? */
                        printk(KERN_INFO 
@@ -3969,6 +4152,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->major_version = info->major_version;
                mddev->minor_version = info->minor_version;
                mddev->patch_version = info->patch_version;
+               mddev->persistent = !info->not_persistent;
                return 0;
        }
        mddev->major_version = MD_MAJOR_VERSION;
@@ -4038,11 +4222,8 @@ static int update_size(mddev_t *mddev, unsigned long size)
                return -EBUSY;
        ITERATE_RDEV(mddev,rdev,tmp) {
                sector_t avail;
-               if (rdev->sb_offset > rdev->data_offset)
-                       avail = (rdev->sb_offset*2) - rdev->data_offset;
-               else
-                       avail = get_capacity(rdev->bdev->bd_disk)
-                               - rdev->data_offset;
+               avail = rdev->size * 2;
+
                if (fit && (size == 0 || size > avail/2))
                        size = avail/2;
                if (avail < ((sector_t)size << 1))
@@ -4296,9 +4477,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
         * Commands querying/configuring an existing array:
         */
        /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
-        * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
+        * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
        if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
-                       && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
+                       && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
+                       && cmd != GET_BITMAP_FILE) {
                err = -ENODEV;
                goto abort_unlock;
        }
@@ -4418,7 +4600,7 @@ static int md_open(struct inode *inode, struct file *file)
        mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
        int err;
 
-       if ((err = mddev_lock(mddev)))
+       if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
                goto out;
 
        err = 0;
@@ -4434,8 +4616,7 @@ static int md_release(struct inode *inode, struct file * file)
 {
        mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
 
-       if (!mddev)
-               BUG();
+       BUG_ON(!mddev);
        mddev_put(mddev);
 
        return 0;
@@ -4498,7 +4679,6 @@ static int md_thread(void * arg)
                         test_bit(THREAD_WAKEUP, &thread->flags)
                         || kthread_should_stop(),
                         thread->timeout);
-               try_to_freeze();
 
                clear_bit(THREAD_WAKEUP, &thread->flags);
 
@@ -4842,8 +5022,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                chunk_kb ? "KB" : "B");
                        if (bitmap->file) {
                                seq_printf(seq, ", file: ");
-                               seq_path(seq, bitmap->file->f_vfsmnt,
-                                        bitmap->file->f_dentry," \t\n");
+                               seq_path(seq, bitmap->file->f_path.mnt,
+                                        bitmap->file->f_path.dentry," \t\n");
                        }
 
                        seq_printf(seq, "\n");
@@ -4882,15 +5062,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
        return error;
 }
 
-static int md_seq_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *m = file->private_data;
-       struct mdstat_info *mi = m->private;
-       m->private = NULL;
-       kfree(mi);
-       return seq_release(inode, file);
-}
-
 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
 {
        struct seq_file *m = filp->private_data;
@@ -4907,11 +5078,12 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
        return mask;
 }
 
-static struct file_operations md_seq_fops = {
+static const struct file_operations md_seq_fops = {
+       .owner          = THIS_MODULE,
        .open           = md_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = md_seq_release,
+       .release        = seq_release_private,
        .poll           = mdstat_poll,
 };
 
@@ -4938,7 +5110,7 @@ static int is_mddev_idle(mddev_t *mddev)
        mdk_rdev_t * rdev;
        struct list_head *tmp;
        int idle;
-       unsigned long curr_events;
+       long curr_events;
 
        idle = 1;
        ITERATE_RDEV(mddev,rdev,tmp) {
@@ -4946,20 +5118,29 @@ static int is_mddev_idle(mddev_t *mddev)
                curr_events = disk_stat_read(disk, sectors[0]) + 
                                disk_stat_read(disk, sectors[1]) - 
                                atomic_read(&disk->sync_io);
-               /* The difference between curr_events and last_events
-                * will be affected by any new non-sync IO (making
-                * curr_events bigger) and any difference in the amount of
-                * in-flight syncio (making current_events bigger or smaller)
-                * The amount in-flight is currently limited to
-                * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
-                * which is at most 4096 sectors.
-                * These numbers are fairly fragile and should be made
-                * more robust, probably by enforcing the
-                * 'window size' that md_do_sync sort-of uses.
+               /* sync IO will cause sync_io to increase before the disk_stats
+                * as sync_io is counted when a request starts, and
+                * disk_stats is counted when it completes.
+                * So resync activity will cause curr_events to be smaller than
+                * when there was no such activity.
+                * non-sync IO will cause disk_stat to increase without
+                * increasing sync_io so curr_events will (eventually)
+                * be larger than it was before.  Once it becomes
+                * substantially larger, the test below will cause
+                * the array to appear non-idle, and resync will slow
+                * down.
+                * If there is a lot of outstanding resync activity when
+                * we set last_event to curr_events, then all that activity
+                * completing might cause the array to appear non-idle
+                * and resync will be slowed down even though there might
+                * not have been non-resync activity.  This will only
+                * happen once though.  'last_events' will soon reflect
+                * the state where there is little or no outstanding
+                * resync requests, and further resync activity will
+                * always make curr_events less than last_events.
                 *
-                * Note: the following is an unsigned comparison.
                 */
-               if ((curr_events - rdev->last_events + 4096) > 8192) {
+               if (curr_events - rdev->last_events > 4096) {
                        rdev->last_events = curr_events;
                        idle = 0;
                }
@@ -5020,6 +5201,33 @@ void md_write_end(mddev_t *mddev)
        }
 }
 
+/* md_allow_write(mddev)
+ * Calling this ensures that the array is marked 'active' so that writes
+ * may proceed without blocking.  It is important to call this before
+ * attempting a GFP_KERNEL allocation while holding the mddev lock.
+ * Must be called with mddev_lock held.
+ */
+void md_allow_write(mddev_t *mddev)
+{
+       if (!mddev->pers)
+               return;
+       if (mddev->ro)
+               return;
+
+       spin_lock_irq(&mddev->write_lock);
+       if (mddev->in_sync) {
+               mddev->in_sync = 0;
+               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               if (mddev->safemode_delay &&
+                   mddev->safemode == 0)
+                       mddev->safemode = 1;
+               spin_unlock_irq(&mddev->write_lock);
+               md_update_sb(mddev, 0);
+       } else
+               spin_unlock_irq(&mddev->write_lock);
+}
+EXPORT_SYMBOL_GPL(md_allow_write);
+
 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
 
 #define SYNC_MARKS     10
@@ -5268,7 +5476,6 @@ void md_do_sync(mddev_t *mddev)
        mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
 
        if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
-           test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
            !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
            mddev->curr_resync > 2) {
                if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -5292,6 +5499,7 @@ void md_do_sync(mddev_t *mddev)
                                        rdev->recovery_offset = mddev->curr_resync;
                }
        }
+       set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
  skip:
        mddev->curr_resync = 0;
@@ -5302,6 +5510,48 @@ void md_do_sync(mddev_t *mddev)
 EXPORT_SYMBOL_GPL(md_do_sync);
 
 
+static int remove_and_add_spares(mddev_t *mddev)
+{
+       mdk_rdev_t *rdev;
+       struct list_head *rtmp;
+       int spares = 0;
+
+       ITERATE_RDEV(mddev,rdev,rtmp)
+               if (rdev->raid_disk >= 0 &&
+                   (test_bit(Faulty, &rdev->flags) ||
+                    ! test_bit(In_sync, &rdev->flags)) &&
+                   atomic_read(&rdev->nr_pending)==0) {
+                       if (mddev->pers->hot_remove_disk(
+                                   mddev, rdev->raid_disk)==0) {
+                               char nm[20];
+                               sprintf(nm,"rd%d", rdev->raid_disk);
+                               sysfs_remove_link(&mddev->kobj, nm);
+                               rdev->raid_disk = -1;
+                       }
+               }
+
+       if (mddev->degraded) {
+               ITERATE_RDEV(mddev,rdev,rtmp)
+                       if (rdev->raid_disk < 0
+                           && !test_bit(Faulty, &rdev->flags)) {
+                               rdev->recovery_offset = 0;
+                               if (mddev->pers->hot_add_disk(mddev,rdev)) {
+                                       char nm[20];
+                                       sprintf(nm, "rd%d", rdev->raid_disk);
+                                       if (sysfs_create_link(&mddev->kobj,
+                                                             &rdev->kobj, nm))
+                                               printk(KERN_WARNING
+                                                      "md: cannot register "
+                                                      "%s for %s\n",
+                                                      nm, mdname(mddev));
+                                       spares++;
+                                       md_new_event(mddev);
+                               } else
+                                       break;
+                       }
+       }
+       return spares;
+}
 /*
  * This routine is regularly called by all per-raid-array threads to
  * deal with generic issues like resync and super-block update.
@@ -5356,7 +5606,7 @@ void md_check_recovery(mddev_t *mddev)
                return;
 
        if (mddev_trylock(mddev)) {
-               int spares =0;
+               int spares = 0;
 
                spin_lock_irq(&mddev->write_lock);
                if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
@@ -5419,35 +5669,13 @@ void md_check_recovery(mddev_t *mddev)
                 * Spare are also removed and re-added, to allow
                 * the personality to fail the re-add.
                 */
-               ITERATE_RDEV(mddev,rdev,rtmp)
-                       if (rdev->raid_disk >= 0 &&
-                           (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
-                           atomic_read(&rdev->nr_pending)==0) {
-                               if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
-                                       char nm[20];
-                                       sprintf(nm,"rd%d", rdev->raid_disk);
-                                       sysfs_remove_link(&mddev->kobj, nm);
-                                       rdev->raid_disk = -1;
-                               }
-                       }
 
-               if (mddev->degraded) {
-                       ITERATE_RDEV(mddev,rdev,rtmp)
-                               if (rdev->raid_disk < 0
-                                   && !test_bit(Faulty, &rdev->flags)) {
-                                       rdev->recovery_offset = 0;
-                                       if (mddev->pers->hot_add_disk(mddev,rdev)) {
-                                               char nm[20];
-                                               sprintf(nm, "rd%d", rdev->raid_disk);
-                                               sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
-                                               spares++;
-                                               md_new_event(mddev);
-                                       } else
-                                               break;
-                               }
-               }
-
-               if (spares) {
+               if (mddev->reshape_position != MaxSector) {
+                       if (mddev->pers->check_reshape(mddev) != 0)
+                               /* Cannot proceed */
+                               goto unlock;
+                       set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+               } else if ((spares = remove_and_add_spares(mddev))) {
                        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                } else if (mddev->recovery_cp < MaxSector) {
@@ -5540,7 +5768,7 @@ static int __init md_init(void)
                            md_probe, NULL, NULL);
 
        register_reboot_notifier(&md_notifier);
-       raid_table_header = register_sysctl_table(raid_root_table, 1);
+       raid_table_header = register_sysctl_table(raid_root_table);
 
        md_geninit();
        return (0);
@@ -5573,7 +5801,7 @@ static void autostart_arrays(int part)
        for (i = 0; i < dev_cnt; i++) {
                dev_t dev = detected_devices[i];
 
-               rdev = md_import_device(dev,0, 0);
+               rdev = md_import_device(dev,0, 90);
                if (IS_ERR(rdev))
                        continue;
 
@@ -5588,7 +5816,7 @@ static void autostart_arrays(int part)
        autorun_devices(part);
 }
 
-#endif
+#endif /* !MODULE */
 
 static __exit void md_exit(void)
 {
@@ -5615,7 +5843,7 @@ static __exit void md_exit(void)
        }
 }
 
-module_init(md_init)
+subsys_initcall(md_init);
 module_exit(md_exit)
 
 static int get_ro(char *buffer, struct kernel_param *kp)