Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/linkage.h>
#include <linux/raid/md.h>
#include <linux/raid/bitmap.h>
#include <linux/sysctl.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
-#include <linux/mutex.h>
#include <linux/ctype.h>
-#include <linux/freezer.h>
-
-#include <linux/init.h>
-
+#include <linux/hdreg.h>
+#include <linux/proc_fs.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
#include <linux/file.h>
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-#include <asm/unaligned.h>
+#include <linux/delay.h>
#define MAJOR_NR MD_MAJOR
-#define MD_DRIVER
/* 63 partitions with the alternate major number (mdp) */
#define MdpMinorShift 6
#ifndef MODULE
-static void autostart_arrays (int part);
+static void autostart_arrays(int part);
#endif
static LIST_HEAD(pers_list);
)
-static int md_fail_request (struct request_queue *q, struct bio *bio)
+static int md_fail_request(struct request_queue *q, struct bio *bio)
{
bio_io_error(bio);
return 0;
list_del(&mddev->all_mddevs);
spin_unlock(&all_mddevs_lock);
blk_cleanup_queue(mddev->queue);
+ if (mddev->sysfs_state)
+ sysfs_put(mddev->sysfs_state);
+ mddev->sysfs_state = NULL;
kobject_put(&mddev->kobj);
} else
spin_unlock(&all_mddevs_lock);
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
atomic_set(&new->active, 1);
+ atomic_set(&new->openers, 0);
spin_lock_init(&new->write_lock);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{
- struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev, *rdev2;
- rdev_for_each(rdev, tmp, mddev1)
- rdev_for_each(rdev2, tmp2, mddev2)
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev1)
+ rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains)
+ rdev2->bdev->bd_contains) {
+ rcu_read_unlock();
return 1;
-
+ }
+ rcu_read_unlock();
return 0;
}
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
- if (rdev->bdev->bd_part)
- ko = &rdev->bdev->bd_part->dev.kobj;
- else
- ko = &rdev->bdev->bd_disk->dev.kobj;
+ ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
kobject_del(&rdev->kobj);
goto fail;
}
- list_add(&rdev->same_set, &mddev->disks);
+ rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+
+ list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
return 0;
return;
}
bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
- list_del_init(&rdev->same_set);
+ list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
-
+ sysfs_put(rdev->sysfs_state);
+ rdev->sysfs_state = NULL;
/* We need to delay this, otherwise we can deadlock when
- * writing to 'remove' to "dev/state"
+ * writing to 'remove' to "dev/state". We also need
+ * to delay it due to rcu usage.
*/
+ synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
schedule_work(&rdev->del_work);
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
return err;
}
if (!shared)
if (!bdev)
MD_BUG();
bd_release(bdev);
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
}
void md_autodetect_dev(dev_t dev);
if (rdev->mddev)
MD_BUG();
free_disk_sb(rdev);
- list_del_init(&rdev->same_set);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
err = 0;
}
- if (!err)
- sysfs_notify(&rdev->kobj, NULL, "state");
+ if (!err && rdev->sysfs_state)
+ sysfs_notify_dirent(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
rdev->raid_disk = -1;
return err;
} else
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
printk(KERN_WARNING
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
}
return len;
}
if (strict_strtoull(buf, 10, &size) < 0)
return -EINVAL;
- if (size < my_mddev->size)
- return -EINVAL;
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
size = super_types[my_mddev->major_version].
size = (rdev->bdev->bd_inode->i_size >> 10);
size -= rdev->data_offset/2;
}
- if (size < my_mddev->size)
- return -EINVAL; /* component must fit device */
}
+ if (size < my_mddev->size)
+ return -EINVAL; /* component must fit device */
rdev->size = size;
if (size > oldsize && my_mddev->external) {
}
+static void md_safemode_timeout(unsigned long data);
+
static ssize_t
safe_delay_show(mddev_t *mddev, char *page)
{
int i;
unsigned long msec;
char buf[30];
- char *e;
+
/* remove a period, and count digits after it */
if (len >= sizeof(buf))
return -EINVAL;
- strlcpy(buf, cbuf, len);
- buf[len] = 0;
+ strlcpy(buf, cbuf, sizeof(buf));
for (i=0; i<len; i++) {
if (dot) {
if (isdigit(buf[i])) {
buf[i] = 0;
}
}
- msec = simple_strtoul(buf, &e, 10);
- if (e == buf || (*e && *e != '\n'))
+ if (strict_strtoul(buf, 10, &msec) < 0)
return -EINVAL;
msec = (msec * 1000) / scale;
if (msec == 0)
mddev->safemode_delay = 0;
else {
+ unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
+ if (mddev->safemode_delay < old_delay)
+ md_safemode_timeout((unsigned long)mddev);
}
return len;
}
break;
case clear:
/* stopping an active array */
- if (atomic_read(&mddev->active) > 1)
+ if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 0, 0);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
- if (atomic_read(&mddev->active) > 1)
+ if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 2, 0);
} else
break;
case read_auto:
if (mddev->pers) {
- if (mddev->ro != 1)
+ if (mddev->ro == 0)
err = do_md_stop(mddev, 1, 0);
- else
+ else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
if (err)
return err;
else {
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
return len;
}
}
{
int major, minor;
char *e;
- if (!list_empty(&mddev->disks))
+ /* Changing the details of 'external' metadata is
+ * always permitted. Otherwise there must be
+ * no devices attached to the array.
+ */
+ if (mddev->external && strncmp(buf, "external:", 9) == 0)
+ ;
+ else if (!list_empty(&mddev->disks))
return -EBUSY;
if (cmd_match(buf, "none")) {
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
+ /* Allow extended partitions. This makes the
+ * 'mdp' device redundant, but we can really
+ * remove it now.
+ */
+ disk->flags |= GENHD_FL_EXT_DEVT;
add_disk(disk);
mddev->gendisk = disk;
- error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
- "%s", "md");
+ error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "md");
mutex_unlock(&disks_mutex);
if (error)
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
- else
+ else {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
+ mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+ }
return NULL;
}
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
return -EINVAL;
}
/*
- * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
+ * chunk-size has to be a power of 2
*/
if ( (1 << ffz(~chunk_size)) != chunk_size) {
printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
return -EINVAL;
}
- if (chunk_size < PAGE_SIZE) {
- printk(KERN_ERR "too small chunk_size: %d < %ld\n",
- chunk_size, PAGE_SIZE);
- return -EINVAL;
- }
/* devices must have minimum size of one chunk */
rdev_for_each(rdev, tmp, mddev) {
}
}
-#ifdef CONFIG_KMOD
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
-#endif
/*
* Drop all container device buffers, from now on
return -EINVAL;
}
}
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
}
md_probe(mddev->unit, NULL, NULL);
mddev->changed = 1;
md_new_event(mddev);
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
sysfs_notify(&mddev->kobj, NULL, "sync_action");
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
return 0;
}
int err = 0;
struct gendisk *disk = mddev->gendisk;
- if (atomic_read(&mddev->active) > 1 + is_open) {
+ if (atomic_read(&mddev->openers) > is_open) {
printk("md: %s still in use.\n",mdname(mddev));
return -EBUSY;
}
del_timer_sync(&mddev->safemode_timer);
- invalidate_partition(disk, 0);
-
switch(mode) {
case 1: /* readonly */
err = -ENXIO;
module_put(mddev->pers->owner);
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
set_capacity(disk, 0);
mddev->changed = 1;
mddev->degraded = 0;
mddev->barriers_work = 0;
mddev->safemode = 0;
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
} else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
mdname(mddev));
err = 0;
md_new_event(mddev);
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
out:
return err;
}
}
printk("\n");
- err = do_md_run (mddev);
+ err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
- do_md_stop (mddev, 0, 0);
+ do_md_stop(mddev, 0, 0);
}
}
/* on success, candidates will be empty, on error
* it won't...
*/
- rdev_for_each_list(rdev, tmp, candidates)
+ rdev_for_each_list(rdev, tmp, candidates) {
+ list_del_init(&rdev->same_set);
export_rdev(rdev);
+ }
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
if (err)
export_rdev(rdev);
else
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
md_update_sb(mddev, 1);
if (mddev->degraded)
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
- rdev = md_import_device (dev, -1, 0);
+ rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
return -EINVAL;
}
- rdev = md_import_device (dev, -1, 0);
+ rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
*/
if (mddev->sync_thread)
return -EBUSY;
+ if (mddev->bitmap)
+ /* Sorry, cannot grow a bitmap yet, just remove it,
+ * grow, and re-add.
+ */
+ return -EBUSY;
rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
return 0;
}
-static int md_ioctl(struct inode *inode, struct file *file,
+static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
* Commands creating/starting a new array:
*/
- mddev = inode->i_bdev->bd_disk->private_data;
+ mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto done_unlock;
case STOP_ARRAY:
- err = do_md_stop (mddev, 0, 1);
+ err = do_md_stop(mddev, 0, 1);
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop (mddev, 1, 1);
+ err = do_md_stop(mddev, 1, 1);
goto done_unlock;
}
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
} else {
goto done_unlock;
case RUN_ARRAY:
- err = do_md_run (mddev);
+ err = do_md_run(mddev);
goto done_unlock;
case SET_BITMAP_FILE:
return err;
}
-static int md_open(struct inode *inode, struct file *file)
+static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = bdev->bd_disk->private_data;
int err;
if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
err = 0;
mddev_get(mddev);
+ atomic_inc(&mddev->openers);
mddev_unlock(mddev);
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
out:
return err;
}
-static int md_release(struct inode *inode, struct file * file)
+static int md_release(struct gendisk *disk, fmode_t mode)
{
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
+ atomic_dec(&mddev->openers);
mddev_put(mddev);
return 0;
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
- .ioctl = md_ioctl,
+ .locked_ioctl = md_ioctl,
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
- mddev->pers->status (seq, mddev);
+ mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
- status_resync (seq, mddev);
+ status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
seq_printf(seq, "\tresync=DELAYED\n ");
static int is_mddev_idle(mddev_t *mddev)
{
mdk_rdev_t * rdev;
- struct list_head *tmp;
int idle;
long curr_events;
idle = 1;
- rdev_for_each(rdev, tmp, mddev) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = disk_stat_read(disk, sectors[0]) +
- disk_stat_read(disk, sectors[1]) -
+ curr_events = part_stat_read(&disk->part0, sectors[0]) +
+ part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
idle = 0;
}
}
+ rcu_read_unlock();
return idle;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
* time 'round when curr_resync == 2
*/
continue;
- prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+ /* We need to wait 'interruptible' so as not to
+ * contribute to the load average, and not to
+ * be caught by 'softlockup'
+ */
+ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
+ if (signal_pending(current))
+ flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
}
- if (mddev->degraded) {
+ if (mddev->degraded && ! mddev->ro) {
rdev_for_each(rdev, rtmp, mddev) {
if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags))
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Blocked, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
flush_signals(current);
}
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
if ( ! (
(mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (mddev->ro) {
+ /* Only thing we do on a ro array is remove
+ * failed devices.
+ */
+ remove_and_add_spares(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ goto unlock;
+ }
+
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
- sysfs_notify(&mddev->kobj, NULL, "array_state");
+ sysfs_notify_dirent(mddev->sysfs_state);
}
if (mddev->flags)
rdev_for_each(rdev, rtmp, mddev)
if (test_and_clear_bit(StateChanged, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
- if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
- sysfs_notify(&rdev->kobj, NULL, "state");
+ sysfs_notify_dirent(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags),
msecs_to_jiffies(5000));
for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
- do_md_stop (mddev, 1, 0);
+ /* Force a switch to readonly even array
+ * appears to still be in use. Hence
+ * the '100'.
+ */
+ do_md_stop(mddev, 1, 100);
mddev_unlock(mddev);
}
/*
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
- return (0);
+ return 0;
}