md: move offset, daemon_sleep and chunksize out of bitmap structure
[safe/jmp/linux-2.6] / drivers / md / md.c
index f64b330..c56c64d 100644 (file)
@@ -98,44 +98,40 @@ static struct ctl_table_header *raid_table_header;
 
 static ctl_table raid_table[] = {
        {
-               .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
                .procname       = "speed_limit_min",
                .data           = &sysctl_speed_limit_min,
                .maxlen         = sizeof(int),
                .mode           = S_IRUGO|S_IWUSR,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = proc_dointvec,
        },
        {
-               .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
                .procname       = "speed_limit_max",
                .data           = &sysctl_speed_limit_max,
                .maxlen         = sizeof(int),
                .mode           = S_IRUGO|S_IWUSR,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = proc_dointvec,
        },
-       { .ctl_name = 0 }
+       { }
 };
 
 static ctl_table raid_dir_table[] = {
        {
-               .ctl_name       = DEV_RAID,
                .procname       = "raid",
                .maxlen         = 0,
                .mode           = S_IRUGO|S_IXUGO,
                .child          = raid_table,
        },
-       { .ctl_name = 0 }
+       { }
 };
 
 static ctl_table raid_root_table[] = {
        {
-               .ctl_name       = CTL_DEV,
                .procname       = "dev",
                .maxlen         = 0,
                .mode           = 0555,
                .child          = raid_dir_table,
        },
-       { .ctl_name = 0 }
+       {  }
 };
 
 static const struct block_device_operations md_fops;
@@ -217,12 +213,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
                return 0;
        }
        rcu_read_lock();
-       if (mddev->suspended) {
+       if (mddev->suspended || mddev->barrier) {
                DEFINE_WAIT(__wait);
                for (;;) {
                        prepare_to_wait(&mddev->sb_wait, &__wait,
                                        TASK_UNINTERRUPTIBLE);
-                       if (!mddev->suspended)
+                       if (!mddev->suspended && !mddev->barrier)
                                break;
                        rcu_read_unlock();
                        schedule();
@@ -262,6 +258,112 @@ static void mddev_resume(mddev_t *mddev)
        mddev->pers->quiesce(mddev, 0);
 }
 
+int mddev_congested(mddev_t *mddev, int bits)
+{
+       if (mddev->barrier)
+               return 1;
+       return mddev->suspended;
+}
+EXPORT_SYMBOL(mddev_congested);
+
+/*
+ * Generic barrier handling for md
+ */
+
+#define POST_REQUEST_BARRIER ((void*)1)
+
+static void md_end_barrier(struct bio *bio, int err)
+{
+       mdk_rdev_t *rdev = bio->bi_private;
+       mddev_t *mddev = rdev->mddev;
+       if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
+               set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
+
+       rdev_dec_pending(rdev, mddev);
+
+       if (atomic_dec_and_test(&mddev->flush_pending)) {
+               if (mddev->barrier == POST_REQUEST_BARRIER) {
+                       /* This was a post-request barrier */
+                       mddev->barrier = NULL;
+                       wake_up(&mddev->sb_wait);
+               } else
+                       /* The pre-request barrier has finished */
+                       schedule_work(&mddev->barrier_work);
+       }
+       bio_put(bio);
+}
+
+static void submit_barriers(mddev_t *mddev)
+{
+       mdk_rdev_t *rdev;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+               if (rdev->raid_disk >= 0 &&
+                   !test_bit(Faulty, &rdev->flags)) {
+                       /* Take two references, one is dropped
+                        * when request finishes, one after
+                        * we reclaim rcu_read_lock
+                        */
+                       struct bio *bi;
+                       atomic_inc(&rdev->nr_pending);
+                       atomic_inc(&rdev->nr_pending);
+                       rcu_read_unlock();
+                       bi = bio_alloc(GFP_KERNEL, 0);
+                       bi->bi_end_io = md_end_barrier;
+                       bi->bi_private = rdev;
+                       bi->bi_bdev = rdev->bdev;
+                       atomic_inc(&mddev->flush_pending);
+                       submit_bio(WRITE_BARRIER, bi);
+                       rcu_read_lock();
+                       rdev_dec_pending(rdev, mddev);
+               }
+       rcu_read_unlock();
+}
+
+static void md_submit_barrier(struct work_struct *ws)
+{
+       mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
+       struct bio *bio = mddev->barrier;
+
+       atomic_set(&mddev->flush_pending, 1);
+
+       if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+               bio_endio(bio, -EOPNOTSUPP);
+       else if (bio->bi_size == 0)
+               /* an empty barrier - all done */
+               bio_endio(bio, 0);
+       else {
+               bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+               if (mddev->pers->make_request(mddev->queue, bio))
+                       generic_make_request(bio);
+               mddev->barrier = POST_REQUEST_BARRIER;
+               submit_barriers(mddev);
+       }
+       if (atomic_dec_and_test(&mddev->flush_pending)) {
+               mddev->barrier = NULL;
+               wake_up(&mddev->sb_wait);
+       }
+}
+
+void md_barrier_request(mddev_t *mddev, struct bio *bio)
+{
+       spin_lock_irq(&mddev->write_lock);
+       wait_event_lock_irq(mddev->sb_wait,
+                           !mddev->barrier,
+                           mddev->write_lock, /*nothing*/);
+       mddev->barrier = bio;
+       spin_unlock_irq(&mddev->write_lock);
+
+       atomic_set(&mddev->flush_pending, 1);
+       INIT_WORK(&mddev->barrier_work, md_submit_barrier);
+
+       submit_barriers(mddev);
+
+       if (atomic_dec_and_test(&mddev->flush_pending))
+               schedule_work(&mddev->barrier_work);
+}
+EXPORT_SYMBOL(md_barrier_request);
 
 static inline mddev_t *mddev_get(mddev_t *mddev)
 {
@@ -361,6 +463,7 @@ static mddev_t * mddev_find(dev_t unit)
 
        mutex_init(&new->open_mutex);
        mutex_init(&new->reconfig_mutex);
+       mutex_init(&new->bitmap_info.mutex);
        INIT_LIST_HEAD(&new->disks);
        INIT_LIST_HEAD(&new->all_mddevs);
        init_timer(&new->safemode_timer);
@@ -368,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
        atomic_set(&new->openers, 0);
        atomic_set(&new->active_io, 0);
        spin_lock_init(&new->write_lock);
+       atomic_set(&new->flush_pending, 0);
        init_waitqueue_head(&new->sb_wait);
        init_waitqueue_head(&new->recovery_wait);
        new->reshape_position = MaxSector;
@@ -746,7 +850,7 @@ struct super_type  {
  */
 int md_check_no_bitmap(mddev_t *mddev)
 {
-       if (!mddev->bitmap_file && !mddev->bitmap_offset)
+       if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
                return 0;
        printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
                mdname(mddev), mddev->pers->name);
@@ -874,8 +978,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->raid_disks = sb->raid_disks;
                mddev->dev_sectors = sb->size * 2;
                mddev->events = ev1;
-               mddev->bitmap_offset = 0;
-               mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
 
                if (mddev->minor_version >= 91) {
                        mddev->reshape_position = sb->reshape_position;
@@ -909,8 +1013,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks = MD_SB_DISKS;
 
                if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
-                   mddev->bitmap_file == NULL)
-                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+                   mddev->bitmap_info.file == NULL)
+                       mddev->bitmap_info.offset =
+                               mddev->bitmap_info.default_offset;
 
        } else if (mddev->pers == NULL) {
                /* Insist on good event counter while assembling */
@@ -938,6 +1043,14 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                            desc->raid_disk < mddev->raid_disks */) {
                        set_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = desc->raid_disk;
+               } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
+                       /* active but not in sync implies recovery up to
+                        * reshape position.  We don't know exactly where
+                        * that is, so set to zero for now */
+                       if (mddev->minor_version >= 91) {
+                               rdev->recovery_offset = 0;
+                               rdev->raid_disk = desc->raid_disk;
+                       }
                }
                if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
                        set_bit(WriteMostly, &rdev->flags);
@@ -1019,15 +1132,26 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->layout = mddev->layout;
        sb->chunk_size = mddev->chunk_sectors << 9;
 
-       if (mddev->bitmap && mddev->bitmap_file == NULL)
+       if (mddev->bitmap && mddev->bitmap_info.file == NULL)
                sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
        sb->disks[0].state = (1<<MD_DISK_REMOVED);
        list_for_each_entry(rdev2, &mddev->disks, same_set) {
                mdp_disk_t *d;
                int desc_nr;
-               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
-                   && !test_bit(Faulty, &rdev2->flags))
+               int is_active = test_bit(In_sync, &rdev2->flags);
+
+               if (rdev2->raid_disk >= 0 &&
+                   sb->minor_version >= 91)
+                       /* we have nowhere to store the recovery_offset,
+                        * but if it is not below the reshape_position,
+                        * we can piggy-back on that.
+                        */
+                       is_active = 1;
+               if (rdev2->raid_disk < 0 ||
+                   test_bit(Faulty, &rdev2->flags))
+                       is_active = 0;
+               if (is_active)
                        desc_nr = rdev2->raid_disk;
                else
                        desc_nr = next_spare++;
@@ -1037,16 +1161,16 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                d->number = rdev2->desc_nr;
                d->major = MAJOR(rdev2->bdev->bd_dev);
                d->minor = MINOR(rdev2->bdev->bd_dev);
-               if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
-                   && !test_bit(Faulty, &rdev2->flags))
+               if (is_active)
                        d->raid_disk = rdev2->raid_disk;
                else
                        d->raid_disk = rdev2->desc_nr; /* compatibility */
                if (test_bit(Faulty, &rdev2->flags))
                        d->state = (1<<MD_DISK_FAULTY);
-               else if (test_bit(In_sync, &rdev2->flags)) {
+               else if (is_active) {
                        d->state = (1<<MD_DISK_ACTIVE);
-                       d->state |= (1<<MD_DISK_SYNC);
+                       if (test_bit(In_sync, &rdev2->flags))
+                               d->state |= (1<<MD_DISK_SYNC);
                        active++;
                        working++;
                } else {
@@ -1086,7 +1210,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
 {
        if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
                return 0; /* component must fit device */
-       if (rdev->mddev->bitmap_offset)
+       if (rdev->mddev->bitmap_info.offset)
                return 0; /* can't move bitmap */
        rdev->sb_start = calc_dev_sboffset(rdev->bdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
@@ -1265,8 +1389,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->raid_disks = le32_to_cpu(sb->raid_disks);
                mddev->dev_sectors = le64_to_cpu(sb->size);
                mddev->events = ev1;
-               mddev->bitmap_offset = 0;
-               mddev->default_bitmap_offset = 1024 >> 9;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = 1024 >> 9;
                
                mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
                memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1274,8 +1398,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->max_disks =  (4096-256)/2;
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
-                   mddev->bitmap_file == NULL )
-                       mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+                   mddev->bitmap_info.file == NULL )
+                       mddev->bitmap_info.offset =
+                               (__s32)le32_to_cpu(sb->bitmap_offset);
 
                if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
                        mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1369,15 +1494,13 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->level = cpu_to_le32(mddev->level);
        sb->layout = cpu_to_le32(mddev->layout);
 
-       if (mddev->bitmap && mddev->bitmap_file == NULL) {
-               sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
+       if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
+               sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
                sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
        }
 
        if (rdev->raid_disk >= 0 &&
            !test_bit(In_sync, &rdev->flags)) {
-               if (mddev->curr_resync_completed > rdev->recovery_offset)
-                       rdev->recovery_offset = mddev->curr_resync_completed;
                if (rdev->recovery_offset > 0) {
                        sb->feature_map |=
                                cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1439,7 +1562,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
                max_sectors -= rdev->data_offset;
                if (!num_sectors || num_sectors > max_sectors)
                        num_sectors = max_sectors;
-       } else if (rdev->mddev->bitmap_offset) {
+       } else if (rdev->mddev->bitmap_info.offset) {
                /* minor version 0 with bitmap we can't move */
                return 0;
        } else {
@@ -1911,6 +2034,14 @@ static void sync_sbs(mddev_t * mddev, int nospares)
         */
        mdk_rdev_t *rdev;
 
+       /* First make sure individual recovery_offsets are correct */
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk >= 0 &&
+                   !test_bit(In_sync, &rdev->flags) &&
+                   mddev->curr_resync_completed > rdev->recovery_offset)
+                               rdev->recovery_offset = mddev->curr_resync_completed;
+
+       }       
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (rdev->sb_events == mddev->events ||
                    (nospares &&
@@ -2625,7 +2756,7 @@ static void analyze_sbs(mddev_t * mddev)
                        rdev->desc_nr = i++;
                        rdev->raid_disk = rdev->desc_nr;
                        set_bit(In_sync, &rdev->flags);
-               } else if (rdev->raid_disk >= mddev->raid_disks) {
+               } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
                        rdev->raid_disk = -1;
                        clear_bit(In_sync, &rdev->flags);
                }
@@ -4218,7 +4349,7 @@ static int do_md_run(mddev_t * mddev)
                        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                        mddev->sync_thread = md_register_thread(md_do_sync,
                                                                mddev,
-                                                               "%s_resync");
+                                                               "resync");
                        if (!mddev->sync_thread) {
                                printk(KERN_ERR "%s: could not start resync"
                                       " thread...\n",
@@ -4378,12 +4509,12 @@ out:
                printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
                bitmap_destroy(mddev);
-               if (mddev->bitmap_file) {
-                       restore_bitmap_write_access(mddev->bitmap_file);
-                       fput(mddev->bitmap_file);
-                       mddev->bitmap_file = NULL;
+               if (mddev->bitmap_info.file) {
+                       restore_bitmap_write_access(mddev->bitmap_info.file);
+                       fput(mddev->bitmap_info.file);
+                       mddev->bitmap_info.file = NULL;
                }
-               mddev->bitmap_offset = 0;
+               mddev->bitmap_info.offset = 0;
 
                /* make sure all md_delayed_delete calls have finished */
                flush_scheduled_work();
@@ -4424,6 +4555,11 @@ out:
                mddev->degraded = 0;
                mddev->barriers_work = 0;
                mddev->safemode = 0;
+               mddev->bitmap_info.offset = 0;
+               mddev->bitmap_info.default_offset = 0;
+               mddev->bitmap_info.chunksize = 0;
+               mddev->bitmap_info.daemon_sleep = 0;
+               mddev->bitmap_info.max_write_behind = 0;
                kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
                if (mddev->hold_active == UNTIL_STOP)
                        mddev->hold_active = 0;
@@ -4609,7 +4745,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
        info.state         = 0;
        if (mddev->in_sync)
                info.state = (1<<MD_SB_CLEAN);
-       if (mddev->bitmap && mddev->bitmap_offset)
+       if (mddev->bitmap && mddev->bitmap_info.offset)
                info.state = (1<<MD_SB_BITMAP_PRESENT);
        info.active_disks  = insync;
        info.working_disks = working;
@@ -4967,23 +5103,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
        if (fd >= 0) {
                if (mddev->bitmap)
                        return -EEXIST; /* cannot add when bitmap is present */
-               mddev->bitmap_file = fget(fd);
+               mddev->bitmap_info.file = fget(fd);
 
-               if (mddev->bitmap_file == NULL) {
+               if (mddev->bitmap_info.file == NULL) {
                        printk(KERN_ERR "%s: error: failed to get bitmap file\n",
                               mdname(mddev));
                        return -EBADF;
                }
 
-               err = deny_bitmap_write_access(mddev->bitmap_file);
+               err = deny_bitmap_write_access(mddev->bitmap_info.file);
                if (err) {
                        printk(KERN_ERR "%s: error: bitmap file is already in use\n",
                               mdname(mddev));
-                       fput(mddev->bitmap_file);
-                       mddev->bitmap_file = NULL;
+                       fput(mddev->bitmap_info.file);
+                       mddev->bitmap_info.file = NULL;
                        return err;
                }
-               mddev->bitmap_offset = 0; /* file overrides offset */
+               mddev->bitmap_info.offset = 0; /* file overrides offset */
        } else if (mddev->bitmap == NULL)
                return -ENOENT; /* cannot remove what isn't there */
        err = 0;
@@ -4998,11 +5134,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
                mddev->pers->quiesce(mddev, 0);
        }
        if (fd < 0) {
-               if (mddev->bitmap_file) {
-                       restore_bitmap_write_access(mddev->bitmap_file);
-                       fput(mddev->bitmap_file);
+               if (mddev->bitmap_info.file) {
+                       restore_bitmap_write_access(mddev->bitmap_info.file);
+                       fput(mddev->bitmap_info.file);
                }
-               mddev->bitmap_file = NULL;
+               mddev->bitmap_info.file = NULL;
        }
 
        return err;
@@ -5069,8 +5205,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->flags         = 0;
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
-       mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
-       mddev->bitmap_offset = 0;
+       mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+       mddev->bitmap_info.offset = 0;
 
        mddev->reshape_position = MaxSector;
 
@@ -5170,7 +5306,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
        int state = 0;
 
        /* calculate expected state,ignoring low bits */
-       if (mddev->bitmap && mddev->bitmap_offset)
+       if (mddev->bitmap && mddev->bitmap_info.offset)
                state |= (1 << MD_SB_BITMAP_PRESENT);
 
        if (mddev->major_version != info->major_version ||
@@ -5229,9 +5365,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
                        /* add the bitmap */
                        if (mddev->bitmap)
                                return -EEXIST;
-                       if (mddev->default_bitmap_offset == 0)
+                       if (mddev->bitmap_info.default_offset == 0)
                                return -EINVAL;
-                       mddev->bitmap_offset = mddev->default_bitmap_offset;
+                       mddev->bitmap_info.offset =
+                               mddev->bitmap_info.default_offset;
                        mddev->pers->quiesce(mddev, 1);
                        rv = bitmap_create(mddev);
                        if (rv)
@@ -5246,7 +5383,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
                        mddev->pers->quiesce(mddev, 1);
                        bitmap_destroy(mddev);
                        mddev->pers->quiesce(mddev, 0);
-                       mddev->bitmap_offset = 0;
+                       mddev->bitmap_info.offset = 0;
                }
        }
        md_update_sb(mddev, 1);
@@ -5631,7 +5768,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        thread->run = run;
        thread->mddev = mddev;
        thread->timeout = MAX_SCHEDULE_TIMEOUT;
-       thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
+       thread->tsk = kthread_run(md_thread, thread,
+                                 "%s_%s",
+                                 mdname(thread->mddev),
+                                 name ?: mddev->pers->name);
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
@@ -5952,14 +6092,14 @@ static int md_seq_show(struct seq_file *seq, void *v)
                        unsigned long chunk_kb;
                        unsigned long flags;
                        spin_lock_irqsave(&bitmap->lock, flags);
-                       chunk_kb = bitmap->chunksize >> 10;
+                       chunk_kb = mddev->bitmap_info.chunksize >> 10;
                        seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
                                "%lu%s chunk",
                                bitmap->pages - bitmap->missing_pages,
                                bitmap->pages,
                                (bitmap->pages - bitmap->missing_pages)
                                        << (PAGE_SHIFT - 10),
-                               chunk_kb ? chunk_kb : bitmap->chunksize,
+                               chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
                                chunk_kb ? "KB" : "B");
                        if (bitmap->file) {
                                seq_printf(seq, ", file: ");
@@ -6350,6 +6490,7 @@ void md_do_sync(mddev_t *mddev)
                       desc, mdname(mddev));
                mddev->curr_resync = j;
        }
+       mddev->curr_resync_completed = mddev->curr_resync;
 
        while (j < max_sectors) {
                sector_t sectors;
@@ -6493,10 +6634,16 @@ void md_do_sync(mddev_t *mddev)
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
  skip:
+       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+               /* We completed so min/max setting can be forgotten if used. */
+               if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       mddev->resync_min = 0;
+               mddev->resync_max = MaxSector;
+       } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+               mddev->resync_min = mddev->curr_resync_completed;
        mddev->curr_resync = 0;
-       mddev->curr_resync_completed = 0;
-       mddev->resync_min = 0;
-       mddev->resync_max = MaxSector;
+       if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+               mddev->curr_resync_completed = 0;
        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
        set_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -6594,7 +6741,7 @@ void md_check_recovery(mddev_t *mddev)
 
 
        if (mddev->bitmap)
-               bitmap_daemon_work(mddev->bitmap);
+               bitmap_daemon_work(mddev);
 
        if (mddev->ro)
                return;
@@ -6745,7 +6892,7 @@ void md_check_recovery(mddev_t *mddev)
                        }
                        mddev->sync_thread = md_register_thread(md_do_sync,
                                                                mddev,
-                                                               "%s_resync");
+                                                               "resync");
                        if (!mddev->sync_thread) {
                                printk(KERN_ERR "%s: could not start resync"
                                        " thread...\n",