md: move offset, daemon_sleep and chunksize out of bitmap structure
[safe/jmp/linux-2.6] / drivers / md / md.c
index 2166af8..c56c64d 100644 (file)
@@ -98,47 +98,43 @@ static struct ctl_table_header *raid_table_header;
 
 static ctl_table raid_table[] = {
        {
-               .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
                .procname       = "speed_limit_min",
                .data           = &sysctl_speed_limit_min,
                .maxlen         = sizeof(int),
                .mode           = S_IRUGO|S_IWUSR,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = proc_dointvec,
        },
        {
-               .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
                .procname       = "speed_limit_max",
                .data           = &sysctl_speed_limit_max,
                .maxlen         = sizeof(int),
                .mode           = S_IRUGO|S_IWUSR,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = proc_dointvec,
        },
-       { .ctl_name = 0 }
+       { }
 };
 
 static ctl_table raid_dir_table[] = {
        {
-               .ctl_name       = DEV_RAID,
                .procname       = "raid",
                .maxlen         = 0,
                .mode           = S_IRUGO|S_IXUGO,
                .child          = raid_table,
        },
-       { .ctl_name = 0 }
+       { }
 };
 
 static ctl_table raid_root_table[] = {
        {
-               .ctl_name       = CTL_DEV,
                .procname       = "dev",
                .maxlen         = 0,
                .mode           = 0555,
                .child          = raid_dir_table,
        },
-       { .ctl_name = 0 }
+       {  }
 };
 
-static struct block_device_operations md_fops;
+static const struct block_device_operations md_fops;
 
 static int start_readonly;
 
@@ -217,12 +213,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
                return 0;
        }
        rcu_read_lock();
-       if (mddev->suspended) {
+       if (mddev->suspended || mddev->barrier) {
                DEFINE_WAIT(__wait);
                for (;;) {
                        prepare_to_wait(&mddev->sb_wait, &__wait,
                                        TASK_UNINTERRUPTIBLE);
-                       if (!mddev->suspended)
+                       if (!mddev->suspended && !mddev->barrier)
                                break;
                        rcu_read_unlock();
                        schedule();
@@ -262,6 +258,112 @@ static void mddev_resume(mddev_t *mddev)
        mddev->pers->quiesce(mddev, 0);
 }
 
+int mddev_congested(mddev_t *mddev, int bits)
+{
+       if (mddev->barrier)
+               return 1;
+       return mddev->suspended;
+}
+EXPORT_SYMBOL(mddev_congested);
+
+/*
+ * Generic barrier handling for md
+ */
+
+#define POST_REQUEST_BARRIER ((void*)1)
+
+static void md_end_barrier(struct bio *bio, int err)
+{
+       mdk_rdev_t *rdev = bio->bi_private;
+       mddev_t *mddev = rdev->mddev;
+       if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
+               set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
+
+       rdev_dec_pending(rdev, mddev);
+
+       if (atomic_dec_and_test(&mddev->flush_pending)) {
+               if (mddev->barrier == POST_REQUEST_BARRIER) {
+                       /* This was a post-request barrier */
+                       mddev->barrier = NULL;
+                       wake_up(&mddev->sb_wait);
+               } else
+                       /* The pre-request barrier has finished */
+                       schedule_work(&mddev->barrier_work);
+       }
+       bio_put(bio);
+}
+
+static void submit_barriers(mddev_t *mddev)
+{
+       mdk_rdev_t *rdev;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+               if (rdev->raid_disk >= 0 &&
+                   !test_bit(Faulty, &rdev->flags)) {
+                       /* Take two references, one is dropped
+                        * when request finishes, one after
+                        * we reclaim rcu_read_lock
+                        */
+                       struct bio *bi;
+                       atomic_inc(&rdev->nr_pending);
+                       atomic_inc(&rdev->nr_pending);
+                       rcu_read_unlock();
+                       bi = bio_alloc(GFP_KERNEL, 0);
+                       bi->bi_end_io = md_end_barrier;
+                       bi->bi_private = rdev;
+                       bi->bi_bdev = rdev->bdev;
+                       atomic_inc(&mddev->flush_pending);
+                       submit_bio(WRITE_BARRIER, bi);
+                       rcu_read_lock();
+                       rdev_dec_pending(rdev, mddev);
+               }
+       rcu_read_unlock();
+}
+
+static void md_submit_barrier(struct work_struct *ws)
+{
+       mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
+       struct bio *bio = mddev->barrier;
+
+       atomic_set(&mddev->flush_pending, 1);
+
+       if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+               bio_endio(bio, -EOPNOTSUPP);
+       else if (bio->bi_size == 0)
+               /* an empty barrier - all done */
+               bio_endio(bio, 0);
+       else {
+               bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+               if (mddev->pers->make_request(mddev->queue, bio))
+                       generic_make_request(bio);
+               mddev->barrier = POST_REQUEST_BARRIER;
+               submit_barriers(mddev);
+       }
+       if (atomic_dec_and_test(&mddev->flush_pending)) {
+               mddev->barrier = NULL;
+               wake_up(&mddev->sb_wait);
+       }
+}
+
+void md_barrier_request(mddev_t *mddev, struct bio *bio)
+{
+       spin_lock_irq(&mddev->write_lock);
+       wait_event_lock_irq(mddev->sb_wait,
+                           !mddev->barrier,
+                           mddev->write_lock, /*nothing*/);
+       mddev->barrier = bio;
+       spin_unlock_irq(&mddev->write_lock);
+
+       atomic_set(&mddev->flush_pending, 1);
+       INIT_WORK(&mddev->barrier_work, md_submit_barrier);
+
+       submit_barriers(mddev);
+
+       if (atomic_dec_and_test(&mddev->flush_pending))
+               schedule_work(&mddev->barrier_work);
+}
+EXPORT_SYMBOL(md_barrier_request);
 
 static inline mddev_t *mddev_get(mddev_t *mddev)
 {
@@ -359,7 +461,9 @@ static mddev_t * mddev_find(dev_t unit)
        else
                new->md_minor = MINOR(unit) >> MdpMinorShift;
 
+       mutex_init(&new->open_mutex);
        mutex_init(&new->reconfig_mutex);
+       mutex_init(&new->bitmap_info.mutex);
        INIT_LIST_HEAD(&new->disks);
        INIT_LIST_HEAD(&new->all_mddevs);
        init_timer(&new->safemode_timer);
@@ -367,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
        atomic_set(&new->openers, 0);
        atomic_set(&new->active_io, 0);
        spin_lock_init(&new->write_lock);
+       atomic_set(&new->flush_pending, 0);
        init_waitqueue_head(&new->sb_wait);
        init_waitqueue_head(&new->recovery_wait);
        new->reshape_position = MaxSector;
@@ -745,7 +850,7 @@ struct super_type  {
  */
 int md_check_no_bitmap(mddev_t *mddev)
 {
-       if (!mddev->bitmap_file && !mddev->bitmap_offset)
+       if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
                return 0;
        printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
                mdname(mddev), mddev->pers->name);
@@ -873,8 +978,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->raid_disks = sb->raid_disks;
                mddev->dev_sectors = sb->size * 2;
                mddev->events = ev1;
-               mddev->bitmap_offset = 0;
-               mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
 
                if (mddev->minor_version >= 91) {
                        mddev->reshape_position = sb->reshape_position;
@@ -908,8 +1013,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks = MD_SB_DISKS;
 
                if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
-                   mddev->bitmap_file == NULL)
-                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+                   mddev->bitmap_info.file == NULL)
+                       mddev->bitmap_info.offset =
+                               mddev->bitmap_info.default_offset;
 
        } else if (mddev->pers == NULL) {
                /* Insist on good event counter while assembling */
@@ -937,6 +1043,14 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                            desc->raid_disk < mddev->raid_disks */) {
                        set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = desc->raid_disk;
+               } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
+                       /* active but not in sync implies recovery up to
+                        * reshape position.  We don't know exactly where
+                        * that is, so set to zero for now */
+                       if (mddev->minor_version >= 91) {
+                               rdev->recovery_offset = 0;
+                               rdev->raid_disk = desc->raid_disk;
+                       }
                }
                if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
                        set_bit(WriteMostly, &rdev->flags);
@@ -1018,15 +1132,26 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->layout = mddev->layout;
        sb->chunk_size = mddev->chunk_sectors << 9;
 
-       if (mddev->bitmap && mddev->bitmap_file == NULL)
+       if (mddev->bitmap && mddev->bitmap_info.file == NULL)
                sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
        list_for_each_entry(rdev2, &mddev->disks, same_set) {
                mdp_disk_t *d;
                int desc_nr;
-               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
-                   && !test_bit(Faulty, &rdev2->flags))
+               int is_active = test_bit(In_sync, &rdev2->flags);
+
+               if (rdev2->raid_disk >= 0 &&
+                   sb->minor_version >= 91)
+                       /* we have nowhere to store the recovery_offset,
+                        * but if it is not below the reshape_position,
+                        * we can piggy-back on that.
+                        */
+                       is_active = 1;
+               if (rdev2->raid_disk < 0 ||
+                   test_bit(Faulty, &rdev2->flags))
+                       is_active = 0;
+               if (is_active)
                        desc_nr = rdev2->raid_disk;
                else
                        desc_nr = next_spare++;
@@ -1036,16 +1161,16 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                d->number = rdev2->desc_nr;
                d->major = MAJOR(rdev2->bdev->bd_dev);
                d->minor = MINOR(rdev2->bdev->bd_dev);
-               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
-                   && !test_bit(Faulty, &rdev2->flags))
+               if (is_active)
                        d->raid_disk = rdev2->raid_disk;
                else
                        d->raid_disk = rdev2->desc_nr; /* compatibility */
                if (test_bit(Faulty, &rdev2->flags))
                        d->state = (1<<MD_DISK_FAULTY);
-               else if (test_bit(In_sync, &rdev2->flags)) {
+               else if (is_active) {
                        d->state = (1<<MD_DISK_ACTIVE);
-                       d->state |= (1<<MD_DISK_SYNC);
+                       if (test_bit(In_sync, &rdev2->flags))
+                               d->state |= (1<<MD_DISK_SYNC);
                        active++;
                        working++;
                } else {
@@ -1085,7 +1210,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
 {
        if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
                return 0; /* component must fit device */
-       if (rdev->mddev->bitmap_offset)
+       if (rdev->mddev->bitmap_info.offset)
                return 0; /* can't move bitmap */
        rdev->sb_start = calc_dev_sboffset(rdev->bdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
@@ -1264,8 +1389,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->raid_disks = le32_to_cpu(sb->raid_disks);
                mddev->dev_sectors = le64_to_cpu(sb->size);
                mddev->events = ev1;
-               mddev->bitmap_offset = 0;
-               mddev->default_bitmap_offset = 1024 >> 9;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = 1024 >> 9;
                
                mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
                memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1273,8 +1398,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks =  (4096-256)/2;
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
-                   mddev->bitmap_file == NULL )
-                       mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+                   mddev->bitmap_info.file == NULL )
+                       mddev->bitmap_info.offset =
+                               (__s32)le32_to_cpu(sb->bitmap_offset);
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
                        mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1308,7 +1434,12 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
        }
        if (mddev->level != LEVEL_MULTIPATH) {
                int role;
-               role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
+               if (rdev->desc_nr < 0 ||
+                   rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
+                       role = 0xffff;
+                       rdev->desc_nr = -1;
+               } else
+                       role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
                switch(role) {
                case 0xffff: /* spare */
                        break;
@@ -1363,15 +1494,13 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->level = cpu_to_le32(mddev->level);
        sb->layout = cpu_to_le32(mddev->layout);
 
-       if (mddev->bitmap && mddev->bitmap_file == NULL) {
-               sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
+       if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
+               sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
                sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
        }
 
        if (rdev->raid_disk >= 0 &&
            !test_bit(In_sync, &rdev->flags)) {
-               if (mddev->curr_resync_completed > rdev->recovery_offset)
-                       rdev->recovery_offset = mddev->curr_resync_completed;
                if (rdev->recovery_offset > 0) {
                        sb->feature_map |=
                                cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1394,8 +1523,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                if (rdev2->desc_nr+1 > max_dev)
                        max_dev = rdev2->desc_nr+1;
 
-       if (max_dev > le32_to_cpu(sb->max_dev))
+       if (max_dev > le32_to_cpu(sb->max_dev)) {
+               int bmask;
                sb->max_dev = cpu_to_le32(max_dev);
+               rdev->sb_size = max_dev * 2 + 256;
+               bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
+               if (rdev->sb_size & bmask)
+                       rdev->sb_size = (rdev->sb_size | bmask) + 1;
+       }
        for (i=0; i<max_dev;i++)
                sb->dev_roles[i] = cpu_to_le16(0xfffe);
        
@@ -1427,7 +1562,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
                max_sectors -= rdev->data_offset;
                if (!num_sectors || num_sectors > max_sectors)
                        num_sectors = max_sectors;
-       } else if (rdev->mddev->bitmap_offset) {
+       } else if (rdev->mddev->bitmap_info.offset) {
                /* minor version 0 with bitmap we can't move */
                return 0;
        } else {
@@ -1487,37 +1622,76 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
 
 static LIST_HEAD(pending_raid_disks);
 
-static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev)
+/*
+ * Try to register data integrity profile for an mddev
+ *
+ * This is called when an array is started and after a disk has been kicked
+ * from the array. It only succeeds if all working and active component devices
+ * are integrity capable with matching profiles.
+ */
+int md_integrity_register(mddev_t *mddev)
+{
+       mdk_rdev_t *rdev, *reference = NULL;
+
+       if (list_empty(&mddev->disks))
+               return 0; /* nothing to do */
+       if (blk_get_integrity(mddev->gendisk))
+               return 0; /* already registered */
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               /* skip spares and non-functional disks */
+               if (test_bit(Faulty, &rdev->flags))
+                       continue;
+               if (rdev->raid_disk < 0)
+                       continue;
+               /*
+                * If at least one rdev is not integrity capable, we can not
+                * enable data integrity for the md device.
+                */
+               if (!bdev_get_integrity(rdev->bdev))
+                       return -EINVAL;
+               if (!reference) {
+                       /* Use the first rdev as the reference */
+                       reference = rdev;
+                       continue;
+               }
+               /* does this rdev's profile match the reference profile? */
+               if (blk_integrity_compare(reference->bdev->bd_disk,
+                               rdev->bdev->bd_disk) < 0)
+                       return -EINVAL;
+       }
+       /*
+        * All component devices are integrity capable and have matching
+        * profiles, register the common profile for the md device.
+        */
+       if (blk_integrity_register(mddev->gendisk,
+                       bdev_get_integrity(reference->bdev)) != 0) {
+               printk(KERN_ERR "md: failed to register integrity for %s\n",
+                       mdname(mddev));
+               return -EINVAL;
+       }
+       printk(KERN_NOTICE "md: data integrity on %s enabled\n",
+               mdname(mddev));
+       return 0;
+}
+EXPORT_SYMBOL(md_integrity_register);
+
+/* Disable data integrity if non-capable/non-matching disk is being added */
+void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
 {
-       struct mdk_personality *pers = mddev->pers;
-       struct gendisk *disk = mddev->gendisk;
        struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
-       struct blk_integrity *bi_mddev = blk_get_integrity(disk);
+       struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
 
-       /* Data integrity passthrough not supported on RAID 4, 5 and 6 */
-       if (pers && pers->level >= 4 && pers->level <= 6)
+       if (!bi_mddev) /* nothing to do */
                return;
-
-       /* If rdev is integrity capable, register profile for mddev */
-       if (!bi_mddev && bi_rdev) {
-               if (blk_integrity_register(disk, bi_rdev))
-                       printk(KERN_ERR "%s: %s Could not register integrity!\n",
-                              __func__, disk->disk_name);
-               else
-                       printk(KERN_NOTICE "Enabling data integrity on %s\n",
-                              disk->disk_name);
+       if (rdev->raid_disk < 0) /* skip spares */
                return;
-       }
-
-       /* Check that mddev and rdev have matching profiles */
-       if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) {
-               printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__,
-                      disk->disk_name, rdev->bdev->bd_disk->disk_name);
-               printk(KERN_NOTICE "Disabling data integrity on %s\n",
-                      disk->disk_name);
-               blk_integrity_unregister(disk);
-       }
+       if (bi_rdev && blk_integrity_compare(mddev->gendisk,
+                                            rdev->bdev->bd_disk) >= 0)
+               return;
+       printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
+       blk_integrity_unregister(mddev->gendisk);
 }
+EXPORT_SYMBOL(md_integrity_add_rdev);
 
 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
 {
@@ -1591,7 +1765,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        /* May as well allow recovery to be retried once */
        mddev->recovery_disabled = 0;
 
-       md_integrity_check(rdev, mddev);
        return 0;
 
  fail:
@@ -1756,9 +1929,10 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
        __u8 *uuid;
 
        uuid = sb->set_uuid;
-       printk(KERN_INFO "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
-                       ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
-              KERN_INFO "md:    Name: \"%s\" CT:%llu\n",
+       printk(KERN_INFO
+              "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
+              ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+              "md:    Name: \"%s\" CT:%llu\n",
                le32_to_cpu(sb->major_version),
                le32_to_cpu(sb->feature_map),
                uuid[0], uuid[1], uuid[2], uuid[3],
@@ -1770,12 +1944,13 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
                       & MD_SUPERBLOCK_1_TIME_SEC_MASK);
 
        uuid = sb->device_uuid;
-       printk(KERN_INFO "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
+       printk(KERN_INFO
+              "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
                        " RO:%llu\n"
-              KERN_INFO "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
-                       ":%02x%02x%02x%02x%02x%02x\n"
-              KERN_INFO "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
-              KERN_INFO "md:         (MaxDev:%u) \n",
+              "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+                       ":%02x%02x%02x%02x%02x%02x\n"
+              "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
+              "md:         (MaxDev:%u) \n",
                le32_to_cpu(sb->level),
                (unsigned long long)le64_to_cpu(sb->size),
                le32_to_cpu(sb->raid_disks),
@@ -1859,6 +2034,14 @@ static void sync_sbs(mddev_t * mddev, int nospares)
         */
        mdk_rdev_t *rdev;
 
+       /* First make sure individual recovery_offsets are correct */
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk >= 0 &&
+                   !test_bit(In_sync, &rdev->flags) &&
+                   mddev->curr_resync_completed > rdev->recovery_offset)
+                               rdev->recovery_offset = mddev->curr_resync_completed;
+
+       }       
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
@@ -1923,17 +2106,14 @@ repeat:
                /* otherwise we have to go forward and ... */
                mddev->events ++;
                if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
-                       /* .. if the array isn't clean, insist on an odd 'events' */
-                       if ((mddev->events&1)==0) {
-                               mddev->events++;
+                       /* .. if the array isn't clean, an 'even' event must also go
+                        * to spares. */
+                       if ((mddev->events&1)==0)
                                nospares = 0;
-                       }
                } else {
-                       /* otherwise insist on an even 'events' (for clean states) */
-                       if ((mddev->events&1)) {
-                               mddev->events++;
+                       /* otherwise an 'odd' event must go to spares */
+                       if ((mddev->events&1))
                                nospares = 0;
-                       }
                }
        }
 
@@ -2576,7 +2756,7 @@ static void analyze_sbs(mddev_t * mddev)
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
                        set_bit(In_sync, &rdev->flags);
-               } else if (rdev->raid_disk >= mddev->raid_disks) {
+               } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
                        rdev->raid_disk = -1;
                        clear_bit(In_sync, &rdev->flags);
                }
@@ -2655,6 +2835,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
        ssize_t rv = len;
        struct mdk_personality *pers;
        void *priv;
+       mdk_rdev_t *rdev;
 
        if (mddev->pers == NULL) {
                if (len == 0)
@@ -2734,6 +2915,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
        mddev_suspend(mddev);
        mddev->pers->stop(mddev);
        module_put(mddev->pers->owner);
+       /* Invalidate devices that are now superfluous */
+       list_for_each_entry(rdev, &mddev->disks, same_set)
+               if (rdev->raid_disk >= mddev->raid_disks) {
+                       rdev->raid_disk = -1;
+                       clear_bit(In_sync, &rdev->flags);
+               }
        mddev->pers = pers;
        mddev->private = priv;
        strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
@@ -3543,6 +3730,7 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
                if (max < mddev->resync_min)
                        return -EINVAL;
                if (max < mddev->resync_max &&
+                   mddev->ro == 0 &&
                    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                        return -EBUSY;
 
@@ -3683,17 +3871,8 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
 
        mddev->array_sectors = sectors;
        set_capacity(mddev->gendisk, mddev->array_sectors);
-       if (mddev->pers) {
-               struct block_device *bdev = bdget_disk(mddev->gendisk, 0);
-
-               if (bdev) {
-                       mutex_lock(&bdev->bd_inode->i_mutex);
-                       i_size_write(bdev->bd_inode,
-                                    (loff_t)mddev->array_sectors << 9);
-                       mutex_unlock(&bdev->bd_inode->i_mutex);
-                       bdput(bdev);
-               }
-       }
+       if (mddev->pers)
+               revalidate_disk(mddev->gendisk);
 
        return len;
 }
@@ -3846,11 +4025,9 @@ static int md_alloc(dev_t dev, char *name)
        flush_scheduled_work();
 
        mutex_lock(&disks_mutex);
-       if (mddev->gendisk) {
-               mutex_unlock(&disks_mutex);
-               mddev_put(mddev);
-               return -EEXIST;
-       }
+       error = -EEXIST;
+       if (mddev->gendisk)
+               goto abort;
 
        if (name) {
                /* Need to ensure that 'name' is not a duplicate.
@@ -3862,17 +4039,15 @@ static int md_alloc(dev_t dev, char *name)
                        if (mddev2->gendisk &&
                            strcmp(mddev2->gendisk->disk_name, name) == 0) {
                                spin_unlock(&all_mddevs_lock);
-                               return -EEXIST;
+                               goto abort;
                        }
                spin_unlock(&all_mddevs_lock);
        }
 
+       error = -ENOMEM;
        mddev->queue = blk_alloc_queue(GFP_KERNEL);
-       if (!mddev->queue) {
-               mutex_unlock(&disks_mutex);
-               mddev_put(mddev);
-               return -ENOMEM;
-       }
+       if (!mddev->queue)
+               goto abort;
        mddev->queue->queuedata = mddev;
 
        /* Can be unlocked because the queue is new: no concurrency */
@@ -3882,11 +4057,9 @@ static int md_alloc(dev_t dev, char *name)
 
        disk = alloc_disk(1 << shift);
        if (!disk) {
-               mutex_unlock(&disks_mutex);
                blk_cleanup_queue(mddev->queue);
                mddev->queue = NULL;
-               mddev_put(mddev);
-               return -ENOMEM;
+               goto abort;
        }
        disk->major = MAJOR(mddev->unit);
        disk->first_minor = unit << shift;
@@ -3908,16 +4081,22 @@ static int md_alloc(dev_t dev, char *name)
        mddev->gendisk = disk;
        error = kobject_init_and_add(&mddev->kobj, &md_ktype,
                                     &disk_to_dev(disk)->kobj, "%s", "md");
-       mutex_unlock(&disks_mutex);
-       if (error)
+       if (error) {
+               /* This isn't possible, but as kobject_init_and_add is marked
+                * __must_check, we must do something with the result
+                */
                printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
                       disk->disk_name);
-       else {
+               error = 0;
+       }
+ abort:
+       mutex_unlock(&disks_mutex);
+       if (!error) {
                kobject_uevent(&mddev->kobj, KOBJ_ADD);
                mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
        }
        mddev_put(mddev);
-       return 0;
+       return error;
 }
 
 static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -4046,10 +4225,6 @@ static int do_md_run(mddev_t * mddev)
        }
        strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
 
-       if (pers->level >= 4 && pers->level <= 6)
-               /* Cannot support integrity (yet) */
-               blk_integrity_unregister(mddev->gendisk);
-
        if (mddev->reshape_position != MaxSector &&
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
@@ -4174,7 +4349,7 @@ static int do_md_run(mddev_t * mddev)
                        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                        mddev->sync_thread = md_register_thread(md_do_sync,
                                                                mddev,
-                                                               "%s_resync");
+                                                               "resync");
                        if (!mddev->sync_thread) {
                                printk(KERN_ERR "%s: could not start resync"
                                       " thread...\n",
@@ -4187,6 +4362,7 @@ static int do_md_run(mddev_t * mddev)
        md_wakeup_thread(mddev->thread);
        md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
 
+       revalidate_disk(mddev->gendisk);
        mddev->changed = 1;
        md_new_event(mddev);
        sysfs_notify_dirent(mddev->sysfs_state);
@@ -4258,12 +4434,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
        struct gendisk *disk = mddev->gendisk;
        mdk_rdev_t *rdev;
 
+       mutex_lock(&mddev->open_mutex);
        if (atomic_read(&mddev->openers) > is_open) {
                printk("md: %s still in use.\n",mdname(mddev));
-               return -EBUSY;
-       }
-
-       if (mddev->pers) {
+               err = -EBUSY;
+       } else if (mddev->pers) {
 
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4320,8 +4495,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                if (mode == 1)
                        set_disk_ro(disk, 1);
                clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               err = 0;
        }
-
+out:
+       mutex_unlock(&mddev->open_mutex);
+       if (err)
+               return err;
        /*
         * Free resources if final stop
         */
@@ -4330,12 +4509,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
                bitmap_destroy(mddev);
-               if (mddev->bitmap_file) {
-                       restore_bitmap_write_access(mddev->bitmap_file);
-                       fput(mddev->bitmap_file);
-                       mddev->bitmap_file = NULL;
+               if (mddev->bitmap_info.file) {
+                       restore_bitmap_write_access(mddev->bitmap_info.file);
+                       fput(mddev->bitmap_info.file);
+                       mddev->bitmap_info.file = NULL;
                }
-               mddev->bitmap_offset = 0;
+               mddev->bitmap_info.offset = 0;
 
                /* make sure all md_delayed_delete calls have finished */
                flush_scheduled_work();
@@ -4376,6 +4555,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                mddev->degraded = 0;
                mddev->barriers_work = 0;
                mddev->safemode = 0;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = 0;
+               mddev->bitmap_info.chunksize = 0;
+               mddev->bitmap_info.daemon_sleep = 0;
+               mddev->bitmap_info.max_write_behind = 0;
                kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
                if (mddev->hold_active == UNTIL_STOP)
                        mddev->hold_active = 0;
@@ -4387,7 +4571,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
        blk_integrity_unregister(disk);
        md_new_event(mddev);
        sysfs_notify_dirent(mddev->sysfs_state);
-out:
        return err;
 }
 
@@ -4528,10 +4711,10 @@ static int get_version(void __user * arg)
 static int get_array_info(mddev_t * mddev, void __user * arg)
 {
        mdu_array_info_t info;
-       int nr,working,active,failed,spare;
+       int nr,working,insync,failed,spare;
        mdk_rdev_t *rdev;
 
-       nr=working=active=failed=spare=0;
+       nr=working=insync=failed=spare=0;
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                nr++;
                if (test_bit(Faulty, &rdev->flags))
@@ -4539,7 +4722,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
                else {
                        working++;
                        if (test_bit(In_sync, &rdev->flags))
-                               active++;       
+                               insync++;       
                        else
                                spare++;
                }
@@ -4562,9 +4745,9 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        info.state         = 0;
        if (mddev->in_sync)
                info.state = (1<<MD_SB_CLEAN);
-       if (mddev->bitmap && mddev->bitmap_offset)
+       if (mddev->bitmap && mddev->bitmap_info.offset)
                info.state = (1<<MD_SB_BITMAP_PRESENT);
-       info.active_disks  = active;
+       info.active_disks  = insync;
        info.working_disks = working;
        info.failed_disks  = failed;
        info.spare_disks   = spare;
@@ -4674,7 +4857,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                if (!list_empty(&mddev->disks)) {
                        mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
                                                        mdk_rdev_t, same_set);
-                       int err = super_types[mddev->major_version]
+                       err = super_types[mddev->major_version]
                                .load_super(rdev, rdev0, mddev->minor_version);
                        if (err < 0) {
                                printk(KERN_WARNING 
@@ -4920,23 +5103,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
        if (fd >= 0) {
                if (mddev->bitmap)
                        return -EEXIST; /* cannot add when bitmap is present */
-               mddev->bitmap_file = fget(fd);
+               mddev->bitmap_info.file = fget(fd);
 
-               if (mddev->bitmap_file == NULL) {
+               if (mddev->bitmap_info.file == NULL) {
                        printk(KERN_ERR "%s: error: failed to get bitmap file\n",
                               mdname(mddev));
                        return -EBADF;
                }
 
-               err = deny_bitmap_write_access(mddev->bitmap_file);
+               err = deny_bitmap_write_access(mddev->bitmap_info.file);
                if (err) {
                        printk(KERN_ERR "%s: error: bitmap file is already in use\n",
                               mdname(mddev));
-                       fput(mddev->bitmap_file);
-                       mddev->bitmap_file = NULL;
+                       fput(mddev->bitmap_info.file);
+                       mddev->bitmap_info.file = NULL;
                        return err;
                }
-               mddev->bitmap_offset = 0; /* file overrides offset */
+               mddev->bitmap_info.offset = 0; /* file overrides offset */
        } else if (mddev->bitmap == NULL)
                return -ENOENT; /* cannot remove what isn't there */
        err = 0;
@@ -4951,11 +5134,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
                mddev->pers->quiesce(mddev, 0);
        }
        if (fd < 0) {
-               if (mddev->bitmap_file) {
-                       restore_bitmap_write_access(mddev->bitmap_file);
-                       fput(mddev->bitmap_file);
+               if (mddev->bitmap_info.file) {
+                       restore_bitmap_write_access(mddev->bitmap_info.file);
+                       fput(mddev->bitmap_info.file);
                }
-               mddev->bitmap_file = NULL;
+               mddev->bitmap_info.file = NULL;
        }
 
        return err;
@@ -5022,8 +5205,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->flags         = 0;
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
-       mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
-       mddev->bitmap_offset = 0;
+       mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+       mddev->bitmap_info.offset = 0;
 
        mddev->reshape_position = MaxSector;
 
@@ -5085,18 +5268,8 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
                        return -ENOSPC;
        }
        rv = mddev->pers->resize(mddev, num_sectors);
-       if (!rv) {
-               struct block_device *bdev;
-
-               bdev = bdget_disk(mddev->gendisk, 0);
-               if (bdev) {
-                       mutex_lock(&bdev->bd_inode->i_mutex);
-                       i_size_write(bdev->bd_inode,
-                                    (loff_t)mddev->array_sectors << 9);
-                       mutex_unlock(&bdev->bd_inode->i_mutex);
-                       bdput(bdev);
-               }
-       }
+       if (!rv)
+               revalidate_disk(mddev->gendisk);
        return rv;
 }
 
@@ -5133,7 +5306,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
        int state = 0;
 
        /* calculate expected state,ignoring low bits */
-       if (mddev->bitmap && mddev->bitmap_offset)
+       if (mddev->bitmap && mddev->bitmap_info.offset)
                state |= (1 << MD_SB_BITMAP_PRESENT);
 
        if (mddev->major_version != info->major_version ||
@@ -5192,9 +5365,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
                        /* add the bitmap */
                        if (mddev->bitmap)
                                return -EEXIST;
-                       if (mddev->default_bitmap_offset == 0)
+                       if (mddev->bitmap_info.default_offset == 0)
                                return -EINVAL;
-                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+                       mddev->bitmap_info.offset =
+                               mddev->bitmap_info.default_offset;
                        mddev->pers->quiesce(mddev, 1);
                        rv = bitmap_create(mddev);
                        if (rv)
@@ -5209,7 +5383,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
                        mddev->pers->quiesce(mddev, 1);
                        bitmap_destroy(mddev);
                        mddev->pers->quiesce(mddev, 0);
-                       mddev->bitmap_offset = 0;
+                       mddev->bitmap_info.offset = 0;
                }
        }
        md_update_sb(mddev, 1);
@@ -5482,12 +5656,12 @@ static int md_open(struct block_device *bdev, fmode_t mode)
        }
        BUG_ON(mddev != bdev->bd_disk->private_data);
 
-       if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
+       if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
                goto out;
 
        err = 0;
        atomic_inc(&mddev->openers);
-       mddev_unlock(mddev);
+       mutex_unlock(&mddev->open_mutex);
 
        check_disk_change(bdev);
  out:
@@ -5519,7 +5693,7 @@ static int md_revalidate(struct gendisk *disk)
        mddev->changed = 0;
        return 0;
 }
-static struct block_device_operations md_fops =
+static const struct block_device_operations md_fops =
 {
        .owner          = THIS_MODULE,
        .open           = md_open,
@@ -5594,7 +5768,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        thread->run = run;
        thread->mddev = mddev;
        thread->timeout = MAX_SCHEDULE_TIMEOUT;
-       thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
+       thread->tsk = kthread_run(md_thread, thread,
+                                 "%s_%s",
+                                 mdname(thread->mddev),
+                                 name ?: mddev->pers->name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -5915,14 +6092,14 @@ static int md_seq_show(struct seq_file *seq, void *v)
                        unsigned long chunk_kb;
                        unsigned long flags;
                        spin_lock_irqsave(&bitmap->lock, flags);
-                       chunk_kb = bitmap->chunksize >> 10;
+                       chunk_kb = mddev->bitmap_info.chunksize >> 10;
                        seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
                                "%lu%s chunk",
                                bitmap->pages - bitmap->missing_pages,
                                bitmap->pages,
                                (bitmap->pages - bitmap->missing_pages)
                                        << (PAGE_SHIFT - 10),
-                               chunk_kb ? chunk_kb : bitmap->chunksize,
+                               chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
                                chunk_kb ? "KB" : "B");
                        if (bitmap->file) {
                                seq_printf(seq, ", file: ");
@@ -6313,6 +6490,7 @@ void md_do_sync(mddev_t *mddev)
                       desc, mdname(mddev));
                mddev->curr_resync = j;
        }
+       mddev->curr_resync_completed = mddev->curr_resync;
 
        while (j < max_sectors) {
                sector_t sectors;
@@ -6336,10 +6514,16 @@ void md_do_sync(mddev_t *mddev)
                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
                }
 
-               if (j >= mddev->resync_max)
-                       wait_event(mddev->recovery_wait,
-                                  mddev->resync_max > j
-                                  || kthread_should_stop());
+               while (j >= mddev->resync_max && !kthread_should_stop()) {
+                       /* As this condition is controlled by user-space,
+                        * we can block indefinitely, so use '_interruptible'
+                        * to avoid triggering warnings.
+                        */
+                       flush_signals(current); /* just in case */
+                       wait_event_interruptible(mddev->recovery_wait,
+                                                mddev->resync_max > j
+                                                || kthread_should_stop());
+               }
 
                if (kthread_should_stop())
                        goto interrupted;
@@ -6450,10 +6634,16 @@ void md_do_sync(mddev_t *mddev)
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
  skip:
+       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+               /* We completed so min/max setting can be forgotten if used. */
+               if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       mddev->resync_min = 0;
+               mddev->resync_max = MaxSector;
+       } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+               mddev->resync_min = mddev->curr_resync_completed;
        mddev->curr_resync = 0;
-       mddev->curr_resync_completed = 0;
-       mddev->resync_min = 0;
-       mddev->resync_max = MaxSector;
+       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+               mddev->curr_resync_completed = 0;
        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
        set_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -6551,7 +6741,7 @@ void md_check_recovery(mddev_t *mddev)
 
 
        if (mddev->bitmap)
-               bitmap_daemon_work(mddev->bitmap);
+               bitmap_daemon_work(mddev);
 
        if (mddev->ro)
                return;
@@ -6702,7 +6892,7 @@ void md_check_recovery(mddev_t *mddev)
                        }
                        mddev->sync_thread = md_register_thread(md_do_sync,
                                                                mddev,
-                                                               "%s_resync");
+                                                               "resync");
                        if (!mddev->sync_thread) {
                                printk(KERN_ERR "%s: could not start resync"
                                        " thread...\n",