static ctl_table raid_table[] = {
{
- .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table raid_dir_table[] = {
{
- .ctl_name = DEV_RAID,
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table raid_root_table[] = {
{
- .ctl_name = CTL_DEV,
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
- { .ctl_name = 0 }
+ { }
};
-static struct block_device_operations md_fops;
+static const struct block_device_operations md_fops;
static int start_readonly;
mddev->pers->quiesce(mddev, 0);
}
+int mddev_congested(mddev_t *mddev, int bits)
+{
+ return mddev->suspended;
+}
+EXPORT_SYMBOL(mddev_congested);
+
static inline mddev_t *mddev_get(mddev_t *mddev)
{
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
+ } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
+ /* active but not in sync implies recovery up to
+ * reshape position. We don't know exactly where
+ * that is, so set to zero for now */
+ if (mddev->minor_version >= 91) {
+ rdev->recovery_offset = 0;
+ rdev->raid_disk = desc->raid_disk;
+ }
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
list_for_each_entry(rdev2, &mddev->disks, same_set) {
mdp_disk_t *d;
int desc_nr;
- if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
- && !test_bit(Faulty, &rdev2->flags))
+ int is_active = test_bit(In_sync, &rdev2->flags);
+
+ if (rdev2->raid_disk >= 0 &&
+ sb->minor_version >= 91)
+ /* we have nowhere to store the recovery_offset,
+ * but if it is not below the reshape_position,
+ * we can piggy-back on that.
+ */
+ is_active = 1;
+ if (rdev2->raid_disk < 0 ||
+ test_bit(Faulty, &rdev2->flags))
+ is_active = 0;
+ if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
- if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
- && !test_bit(Faulty, &rdev2->flags))
+ if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
- else if (test_bit(In_sync, &rdev2->flags)) {
+ else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
- d->state |= (1<<MD_DISK_SYNC);
+ if (test_bit(In_sync, &rdev2->flags))
+ d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
- if (mddev->curr_resync_completed > rdev->recovery_offset)
- rdev->recovery_offset = mddev->curr_resync_completed;
if (rdev->recovery_offset > 0) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
*/
mdk_rdev_t *rdev;
+ /* First make sure individual recovery_offsets are correct */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ mddev->curr_resync_completed > rdev->recovery_offset)
+ rdev->recovery_offset = mddev->curr_resync_completed;
+
+ }
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->sb_events == mddev->events ||
(nospares &&
/* otherwise we have to go forward and ... */
mddev->events ++;
if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
- /* .. if the array isn't clean, insist on an odd 'events' */
- if ((mddev->events&1)==0) {
- mddev->events++;
+ /* .. if the array isn't clean, an 'even' event must also go
+ * to spares. */
+ if ((mddev->events&1)==0)
nospares = 0;
- }
} else {
- /* otherwise insist on an even 'events' (for clean states) */
- if ((mddev->events&1)) {
- mddev->events++;
+ /* otherwise an 'odd' event must go to spares */
+ if ((mddev->events&1))
nospares = 0;
- }
}
}
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
- } else if (rdev->raid_disk >= mddev->raid_disks) {
+ } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
+ mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
- "%s_resync");
+ "resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
if (mode == 1)
set_disk_ro(disk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
static int get_array_info(mddev_t * mddev, void __user * arg)
{
mdu_array_info_t info;
- int nr,working,active,failed,spare;
+ int nr,working,insync,failed,spare;
mdk_rdev_t *rdev;
- nr=working=active=failed=spare=0;
+ nr=working=insync=failed=spare=0;
list_for_each_entry(rdev, &mddev->disks, same_set) {
nr++;
if (test_bit(Faulty, &rdev->flags))
else {
working++;
if (test_bit(In_sync, &rdev->flags))
- active++;
+ insync++;
else
spare++;
}
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
- info.active_disks = active;
+ info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
if (!list_empty(&mddev->disks)) {
mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
mdk_rdev_t, same_set);
- int err = super_types[mddev->major_version]
+ err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
mddev->changed = 0;
return 0;
}
-static struct block_device_operations md_fops =
+static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
- thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
+ thread->tsk = kthread_run(md_thread, thread,
+ "%s_%s",
+ mdname(thread->mddev),
+ name ?: mddev->pers->name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
skip:
mddev->curr_resync = 0;
mddev->curr_resync_completed = 0;
- mddev->resync_min = 0;
- mddev->resync_max = MaxSector;
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ /* We completed so max setting can be forgotten. */
+ mddev->resync_max = MaxSector;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
- "%s_resync");
+ "resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",