dm mpath: hold io until all pg_inits completed
[safe/jmp/linux-2.6] / drivers / md / md.c
index 93287f8..a20a71e 100644 (file)
 #include <linux/buffer_head.h> /* for invalidate_bdev */
 #include <linux/poll.h>
 #include <linux/ctype.h>
+#include <linux/string.h>
 #include <linux/hdreg.h>
 #include <linux/proc_fs.h>
 #include <linux/random.h>
 #include <linux/reboot.h>
 #include <linux/file.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/raid/md_p.h>
 #include <linux/raid/md_u.h>
@@ -68,6 +70,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
 /*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+ * count by 2 for every hour elapsed between read errors.
+ */
+#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/*
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
  * Increase it if you want to have more _guaranteed_ speed. Note that
@@ -378,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
        if (!mddev->raid_disks && list_empty(&mddev->disks) &&
-           !mddev->hold_active) {
+           mddev->ctime == 0 && !mddev->hold_active) {
+               /* Array is not configured at all, and not held active,
+                * so destroy it */
                list_del(&mddev->all_mddevs);
                if (mddev->gendisk) {
                        /* we did a probe so need to clean up.
@@ -1501,12 +1511,10 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 
        if (rdev->raid_disk >= 0 &&
            !test_bit(In_sync, &rdev->flags)) {
-               if (rdev->recovery_offset > 0) {
-                       sb->feature_map |=
-                               cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
-                       sb->recovery_offset =
-                               cpu_to_le64(rdev->recovery_offset);
-               }
+               sb->feature_map |=
+                       cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+               sb->recovery_offset =
+                       cpu_to_le64(rdev->recovery_offset);
        }
 
        if (mddev->reshape_position != MaxSector) {
@@ -1540,7 +1548,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
                else if (test_bit(In_sync, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
-               else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
+               else if (rdev2->raid_disk >= 0)
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
                else
                        sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1930,15 +1938,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
 
        uuid = sb->set_uuid;
        printk(KERN_INFO
-              "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
-              ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+              "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
               "md:    Name: \"%s\" CT:%llu\n",
                le32_to_cpu(sb->major_version),
                le32_to_cpu(sb->feature_map),
-               uuid[0], uuid[1], uuid[2], uuid[3],
-               uuid[4], uuid[5], uuid[6], uuid[7],
-               uuid[8], uuid[9], uuid[10], uuid[11],
-               uuid[12], uuid[13], uuid[14], uuid[15],
+               uuid,
                sb->set_name,
                (unsigned long long)le64_to_cpu(sb->ctime)
                       & MD_SUPERBLOCK_1_TIME_SEC_MASK);
@@ -1947,8 +1951,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
        printk(KERN_INFO
               "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
                        " RO:%llu\n"
-              "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
-                       ":%02x%02x%02x%02x%02x%02x\n"
+              "md:     Dev:%08x UUID: %pU\n"
               "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
               "md:         (MaxDev:%u) \n",
                le32_to_cpu(sb->level),
@@ -1961,10 +1964,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
                (unsigned long long)le64_to_cpu(sb->super_offset),
                (unsigned long long)le64_to_cpu(sb->recovery_offset),
                le32_to_cpu(sb->dev_number),
-               uuid[0], uuid[1], uuid[2], uuid[3],
-               uuid[4], uuid[5], uuid[6], uuid[7],
-               uuid[8], uuid[9], uuid[10], uuid[11],
-               uuid[12], uuid[13], uuid[14], uuid[15],
+               uuid,
                sb->devflags,
                (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
                (unsigned long long)le64_to_cpu(sb->events),
@@ -2546,12 +2546,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 static struct rdev_sysfs_entry rdev_size =
 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
 
+
+static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
+{
+       unsigned long long recovery_start = rdev->recovery_offset;
+
+       if (test_bit(In_sync, &rdev->flags) ||
+           recovery_start == MaxSector)
+               return sprintf(page, "none\n");
+
+       return sprintf(page, "%llu\n", recovery_start);
+}
+
+static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       unsigned long long recovery_start;
+
+       if (cmd_match(buf, "none"))
+               recovery_start = MaxSector;
+       else if (strict_strtoull(buf, 10, &recovery_start))
+               return -EINVAL;
+
+       if (rdev->mddev->pers &&
+           rdev->raid_disk >= 0)
+               return -EBUSY;
+
+       rdev->recovery_offset = recovery_start;
+       if (recovery_start == MaxSector)
+               set_bit(In_sync, &rdev->flags);
+       else
+               clear_bit(In_sync, &rdev->flags);
+       return len;
+}
+
+static struct rdev_sysfs_entry rdev_recovery_start =
+__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
 static struct attribute *rdev_default_attrs[] = {
        &rdev_state.attr,
        &rdev_errors.attr,
        &rdev_slot.attr,
        &rdev_offset.attr,
        &rdev_size.attr,
+       &rdev_recovery_start.attr,
        NULL,
 };
 static ssize_t
@@ -2653,6 +2690,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
        rdev->flags = 0;
        rdev->data_offset = 0;
        rdev->sb_events = 0;
+       rdev->last_read_error.tv_sec  = 0;
+       rdev->last_read_error.tv_nsec = 0;
        atomic_set(&rdev->nr_pending, 0);
        atomic_set(&rdev->read_errors, 0);
        atomic_set(&rdev->corrected_errors, 0);
@@ -3094,7 +3133,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
 
        if (mddev->pers)
                return -EBUSY;
-       if (!*buf || (*e && *e != '\n'))
+       if (cmd_match(buf, "none"))
+               n = MaxSector;
+       else if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
        mddev->recovery_cp = n;
@@ -3290,6 +3331,29 @@ static struct md_sysfs_entry md_array_state =
 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
 
 static ssize_t
+max_corrected_read_errors_show(mddev_t *mddev, char *page) {
+       return sprintf(page, "%d\n",
+                      atomic_read(&mddev->max_corr_read_errors));
+}
+
+static ssize_t
+max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+
+       if (*buf && (*e == 0 || *e == '\n')) {
+               atomic_set(&mddev->max_corr_read_errors, n);
+               return len;
+       }
+       return -EINVAL;
+}
+
+static struct md_sysfs_entry max_corr_read_errors =
+__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
+       max_corrected_read_errors_store);
+
+static ssize_t
 null_show(mddev_t *mddev, char *page)
 {
        return -EINVAL;
@@ -3370,8 +3434,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len)
                }
                if (*end && !isspace(*end)) break;
                bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
-               buf = end;
-               while (isspace(*buf)) buf++;
+               buf = skip_spaces(end);
        }
        bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
 out:
@@ -3914,6 +3977,7 @@ static struct attribute *md_default_attrs[] = {
        &md_array_state.attr,
        &md_reshape_position.attr,
        &md_array_size.attr,
+       &max_corr_read_errors.attr,
        NULL,
 };
 
@@ -4011,13 +4075,16 @@ static void mddev_delayed_delete(struct work_struct *ws)
 {
        mddev_t *mddev = container_of(ws, mddev_t, del_work);
 
-       if (mddev->private == &md_redundancy_group) {
+       if (mddev->private) {
                sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+               if (mddev->private != (void*)1)
+                       sysfs_remove_group(&mddev->kobj, mddev->private);
                if (mddev->sysfs_action)
                        sysfs_put(mddev->sysfs_action);
                mddev->sysfs_action = NULL;
                mddev->private = NULL;
        }
+       sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
        kobject_del(&mddev->kobj);
        kobject_put(&mddev->kobj);
 }
@@ -4109,6 +4176,8 @@ static int md_alloc(dev_t dev, char *name)
                       disk->disk_name);
                error = 0;
        }
+       if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+               printk(KERN_DEBUG "pointless warning\n");
  abort:
        mutex_unlock(&disks_mutex);
        if (!error) {
@@ -4220,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
                sysfs_notify_dirent(rdev->sysfs_state);
        }
 
-       md_probe(mddev->unit, NULL, NULL);
        disk = mddev->gendisk;
-       if (!disk)
-               return -ENOMEM;
 
        spin_lock(&pers_lock);
        pers = find_pers(mddev->level, mddev->clevel);
@@ -4290,7 +4356,7 @@ static int do_md_run(mddev_t * mddev)
        mddev->barriers_work = 1;
        mddev->ok_start_degraded = start_dirty_degraded;
 
-       if (start_readonly)
+       if (start_readonly && mddev->ro == 0)
                mddev->ro = 2; /* read-only, but switch on first write */
 
        err = mddev->pers->run(mddev);
@@ -4330,6 +4396,8 @@ static int do_md_run(mddev_t * mddev)
                mddev->ro = 0;
 
        atomic_set(&mddev->writes_pending,0);
+       atomic_set(&mddev->max_corr_read_errors,
+                  MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
        mddev->safemode = 0;
        mddev->safemode_timer.function = md_safemode_timeout;
        mddev->safemode_timer.data = (unsigned long) mddev;
@@ -4352,33 +4420,6 @@ static int do_md_run(mddev_t * mddev)
 
        set_capacity(disk, mddev->array_sectors);
 
-       /* If there is a partially-recovered drive we need to
-        * start recovery here.  If we leave it to md_check_recovery,
-        * it will remove the drives and not do the right thing
-        */
-       if (mddev->degraded && !mddev->sync_thread) {
-               int spares = 0;
-               list_for_each_entry(rdev, &mddev->disks, same_set)
-                       if (rdev->raid_disk >= 0 &&
-                           !test_bit(In_sync, &rdev->flags) &&
-                           !test_bit(Faulty, &rdev->flags))
-                               /* complete an interrupted recovery */
-                               spares++;
-               if (spares && mddev->pers->sync_request) {
-                       mddev->recovery = 0;
-                       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-                       mddev->sync_thread = md_register_thread(md_do_sync,
-                                                               mddev,
-                                                               "resync");
-                       if (!mddev->sync_thread) {
-                               printk(KERN_ERR "%s: could not start resync"
-                                      " thread...\n",
-                                      mdname(mddev));
-                               /* leave the spares where they are, it shouldn't hurt */
-                               mddev->recovery = 0;
-                       }
-               }
-       }
        md_wakeup_thread(mddev->thread);
        md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
 
@@ -4434,7 +4475,7 @@ static int deny_bitmap_write_access(struct file * file)
        return 0;
 }
 
-static void restore_bitmap_write_access(struct file *file)
+void restore_bitmap_write_access(struct file *file)
 {
        struct inode *inode = file->f_mapping->host;
 
@@ -4488,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
                        mddev->queue->unplug_fn = NULL;
                        mddev->queue->backing_dev_info.congested_fn = NULL;
                        module_put(mddev->pers->owner);
-                       if (mddev->pers->sync_request)
-                               mddev->private = &md_redundancy_group;
+                       if (mddev->pers->sync_request && mddev->private == NULL)
+                               mddev->private = (void*)1;
                        mddev->pers = NULL;
                        /* tell userspace to handle 'inactive' */
                        sysfs_notify_dirent(mddev->sysfs_state);
@@ -4536,9 +4577,6 @@ out:
                }
                mddev->bitmap_info.offset = 0;
 
-               /* make sure all md_delayed_delete calls have finished */
-               flush_scheduled_work();
-
                export_array(mddev);
 
                mddev->array_sectors = 0;
@@ -5195,6 +5233,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->minor_version = info->minor_version;
                mddev->patch_version = info->patch_version;
                mddev->persistent = !info->not_persistent;
+               /* ensure mddev_put doesn't delete this now that there
+                * is some minimal configuration.
+                */
+               mddev->ctime         = get_seconds();
                return 0;
        }
        mddev->major_version = MD_MAJOR_VERSION;
@@ -5654,6 +5696,25 @@ done:
 abort:
        return err;
 }
+#ifdef CONFIG_COMPAT
+static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
+                   unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case HOT_REMOVE_DISK:
+       case HOT_ADD_DISK:
+       case SET_DISK_FAULTY:
+       case SET_BITMAP_FILE:
+               /* These take in integer arg, do not convert */
+               break;
+       default:
+               arg = (unsigned long)compat_ptr(arg);
+               break;
+       }
+
+       return md_ioctl(bdev, mode, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
 
 static int md_open(struct block_device *bdev, fmode_t mode)
 {
@@ -5719,6 +5780,9 @@ static const struct block_device_operations md_fops =
        .open           = md_open,
        .release        = md_release,
        .ioctl          = md_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = md_compat_ioctl,
+#endif
        .getgeo         = md_getgeo,
        .media_changed  = md_media_changed,
        .revalidate_disk= md_revalidate,
@@ -6405,10 +6469,11 @@ void md_do_sync(mddev_t *mddev)
                mddev->curr_resync = 2;
 
        try_again:
-               if (kthread_should_stop()) {
+               if (kthread_should_stop())
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
+               if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        goto skip;
-               }
                for_each_mddev(mddev2, tmp) {
                        if (mddev2 == mddev)
                                continue;
@@ -6468,12 +6533,14 @@ void md_do_sync(mddev_t *mddev)
                /* recovery follows the physical size of devices */
                max_sectors = mddev->dev_sectors;
                j = MaxSector;
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rcu_read_lock();
+               list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(In_sync, &rdev->flags) &&
                            rdev->recovery_offset < j)
                                j = rdev->recovery_offset;
+               rcu_read_unlock();
        }
 
        printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -6643,12 +6710,14 @@ void md_do_sync(mddev_t *mddev)
                } else {
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
-                       list_for_each_entry(rdev, &mddev->disks, same_set)
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
                                if (rdev->raid_disk >= 0 &&
                                    !test_bit(Faulty, &rdev->flags) &&
                                    !test_bit(In_sync, &rdev->flags) &&
                                    rdev->recovery_offset < mddev->curr_resync)
                                        rdev->recovery_offset = mddev->curr_resync;
+                       rcu_read_unlock();
                }
        }
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -6726,6 +6795,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                                                       nm, mdname(mddev));
                                        spares++;
                                        md_new_event(mddev);
+                                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
                                } else
                                        break;
                        }
@@ -7131,5 +7201,6 @@ EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
 EXPORT_SYMBOL(md_check_recovery);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD RAID framework");
 MODULE_ALIAS("md");
 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);