md:Add support for Raid0->Raid5 takeover
[safe/jmp/linux-2.6] / drivers / md / raid5.c
index dcce204..bb28fd6 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/cpu.h>
 #include "md.h"
 #include "raid5.h"
+#include "raid0.h"
 #include "bitmap.h"
 
 /*
@@ -1618,7 +1619,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        char b[BDEVNAME_SIZE];
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev->private;
        pr_debug("raid5: error called\n");
 
        if (!test_bit(Faulty, &rdev->flags)) {
@@ -2947,6 +2948,7 @@ static void handle_stripe5(struct stripe_head *sh)
        struct r5dev *dev;
        mdk_rdev_t *blocked_rdev = NULL;
        int prexor;
+       int dec_preread_active = 0;
 
        memset(&s, 0, sizeof(s));
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -3096,12 +3098,8 @@ static void handle_stripe5(struct stripe_head *sh)
                                        set_bit(STRIPE_INSYNC, &sh->state);
                        }
                }
-               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                       atomic_dec(&conf->preread_active_stripes);
-                       if (atomic_read(&conf->preread_active_stripes) <
-                               IO_THRESHOLD)
-                               md_wakeup_thread(conf->mddev->thread);
-               }
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       dec_preread_active = 1;
        }
 
        /* Now to consider new write requests and what else, if anything
@@ -3208,6 +3206,16 @@ static void handle_stripe5(struct stripe_head *sh)
 
        ops_run_io(sh, &s);
 
+       if (dec_preread_active) {
+               /* We delay this until after ops_run_io so that if make_request
+                * is waiting on a barrier, it won't continue until the writes
+                * have actually been submitted.
+                */
+               atomic_dec(&conf->preread_active_stripes);
+               if (atomic_read(&conf->preread_active_stripes) <
+                   IO_THRESHOLD)
+                       md_wakeup_thread(conf->mddev->thread);
+       }
        return_io(return_bi);
 }
 
@@ -3221,6 +3229,7 @@ static void handle_stripe6(struct stripe_head *sh)
        struct r6_state r6s;
        struct r5dev *dev, *pdev, *qdev;
        mdk_rdev_t *blocked_rdev = NULL;
+       int dec_preread_active = 0;
 
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
                "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3358,7 +3367,6 @@ static void handle_stripe6(struct stripe_head *sh)
         * completed
         */
        if (sh->reconstruct_state == reconstruct_state_drain_result) {
-               int qd_idx = sh->qd_idx;
 
                sh->reconstruct_state = reconstruct_state_idle;
                /* All the 'written' buffers and the parity blocks are ready to
@@ -3380,12 +3388,8 @@ static void handle_stripe6(struct stripe_head *sh)
                                        set_bit(STRIPE_INSYNC, &sh->state);
                        }
                }
-               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                       atomic_dec(&conf->preread_active_stripes);
-                       if (atomic_read(&conf->preread_active_stripes) <
-                               IO_THRESHOLD)
-                               md_wakeup_thread(conf->mddev->thread);
-               }
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       dec_preread_active = 1;
        }
 
        /* Now to consider new write requests and what else, if anything
@@ -3494,6 +3498,18 @@ static void handle_stripe6(struct stripe_head *sh)
 
        ops_run_io(sh, &s);
 
+
+       if (dec_preread_active) {
+               /* We delay this until after ops_run_io so that if make_request
+                * is waiting on a barrier, it won't continue until the writes
+                * have actually been submitted.
+                */
+               atomic_dec(&conf->preread_active_stripes);
+               if (atomic_read(&conf->preread_active_stripes) <
+                   IO_THRESHOLD)
+                       md_wakeup_thread(conf->mddev->thread);
+       }
+
        return_io(return_bi);
 }
 
@@ -3724,7 +3740,7 @@ static int bio_fits_rdev(struct bio *bi)
        if ((bi->bi_size>>9) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > queue_max_phys_segments(q))
+       if (bi->bi_phys_segments > queue_max_segments(q))
                return 0;
 
        if (q->merge_bvec_fn)
@@ -3741,7 +3757,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev->private;
-       unsigned int dd_idx;
+       int dd_idx;
        struct bio* align_bi;
        mdk_rdev_t *rdev;
 
@@ -3866,7 +3882,13 @@ static int make_request(struct request_queue *q, struct bio * bi)
        int cpu, remaining;
 
        if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
-               bio_endio(bi, -EOPNOTSUPP);
+               /* Drain all pending writes.  We only really need
+                * to ensure they have been submitted, but this is
+                * easier.
+                */
+               mddev->pers->quiesce(mddev, 1);
+               mddev->pers->quiesce(mddev, 0);
+               md_barrier_request(mddev, bi);
                return 0;
        }
 
@@ -3990,6 +4012,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
+                       if (mddev->barrier && 
+                           !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                               atomic_inc(&conf->preread_active_stripes);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -4009,6 +4034,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
 
                bio_endio(bi, 0);
        }
+
+       if (mddev->barrier) {
+               /* We need to wait for the stripes to all be handled.
+                * So: wait for preread_active_stripes to drop to 0.
+                */
+               wait_event(mddev->thread->wqueue,
+                          atomic_read(&conf->preread_active_stripes) == 0);
+       }
        return 0;
 }
 
@@ -4025,7 +4058,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
         * As the reads complete, handle_stripe will copy the data
         * into the destination stripe and release that stripe.
         */
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev->private;
        struct stripe_head *sh;
        sector_t first_sector, last_sector;
        int raid_disks = conf->previous_raid_disks;
@@ -4234,7 +4267,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
 /* FIXME go_faster isn't used */
 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 {
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev->private;
        struct stripe_head *sh;
        sector_t max_sector = mddev->dev_sectors;
        int sync_blocks;
@@ -4648,7 +4681,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
 {
        unsigned long cpu;
        struct page *spare_page;
-       struct raid5_percpu *allcpus;
+       struct raid5_percpu __percpu *allcpus;
        void *scribble;
        int err;
 
@@ -4823,11 +4856,40 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                return ERR_PTR(-ENOMEM);
 }
 
+
+static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
+{
+       switch (algo) {
+       case ALGORITHM_PARITY_0:
+               if (raid_disk < max_degraded)
+                       return 1;
+               break;
+       case ALGORITHM_PARITY_N:
+               if (raid_disk >= raid_disks - max_degraded)
+                       return 1;
+               break;
+       case ALGORITHM_PARITY_0_6:
+               if (raid_disk == 0 || 
+                   raid_disk == raid_disks - 1)
+                       return 1;
+               break;
+       case ALGORITHM_LEFT_ASYMMETRIC_6:
+       case ALGORITHM_RIGHT_ASYMMETRIC_6:
+       case ALGORITHM_LEFT_SYMMETRIC_6:
+       case ALGORITHM_RIGHT_SYMMETRIC_6:
+               if (raid_disk == raid_disks - 1)
+                       return 1;
+       }
+       return 0;
+}
+
 static int run(mddev_t *mddev)
 {
        raid5_conf_t *conf;
        int working_disks = 0, chunk_size;
+       int dirty_parity_disks = 0;
        mdk_rdev_t *rdev;
+       sector_t reshape_offset = 0;
 
        if (mddev->recovery_cp != MaxSector)
                printk(KERN_NOTICE "raid5: %s is not clean"
@@ -4861,6 +4923,7 @@ static int run(mddev_t *mddev)
                               "on a stripe boundary\n");
                        return -EINVAL;
                }
+               reshape_offset = here_new * mddev->new_chunk_sectors;
                /* here_new is the stripe we will write to */
                here_old = mddev->reshape_position;
                sector_div(here_old, mddev->chunk_sectors *
@@ -4916,10 +4979,51 @@ static int run(mddev_t *mddev)
        /*
         * 0 for a fully functional array, 1 or 2 for a degraded array.
         */
-       list_for_each_entry(rdev, &mddev->disks, same_set)
-               if (rdev->raid_disk >= 0 &&
-                   test_bit(In_sync, &rdev->flags))
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk < 0)
+                       continue;
+               if (test_bit(In_sync, &rdev->flags))
                        working_disks++;
+               /* This disc is not fully in-sync.  However if it
+                * just stored parity (beyond the recovery_offset),
+                * when we don't need to be concerned about the
+                * array being dirty.
+                * When reshape goes 'backwards', we never have
+                * partially completed devices, so we only need
+                * to worry about reshape going forwards.
+                */
+               /* Hack because v0.91 doesn't store recovery_offset properly. */
+               if (mddev->major_version == 0 &&
+                   mddev->minor_version > 90)
+                       rdev->recovery_offset = reshape_offset;
+                       
+               printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
+                      rdev->raid_disk, working_disks, conf->prev_algo,
+                      conf->previous_raid_disks, conf->max_degraded,
+                      conf->algorithm, conf->raid_disks, 
+                      only_parity(rdev->raid_disk,
+                                  conf->prev_algo,
+                                  conf->previous_raid_disks,
+                                  conf->max_degraded),
+                      only_parity(rdev->raid_disk,
+                                  conf->algorithm,
+                                  conf->raid_disks,
+                                  conf->max_degraded));
+               if (rdev->recovery_offset < reshape_offset) {
+                       /* We need to check old and new layout */
+                       if (!only_parity(rdev->raid_disk,
+                                        conf->algorithm,
+                                        conf->raid_disks,
+                                        conf->max_degraded))
+                               continue;
+               }
+               if (!only_parity(rdev->raid_disk,
+                                conf->prev_algo,
+                                conf->previous_raid_disks,
+                                conf->max_degraded))
+                       continue;
+               dirty_parity_disks++;
+       }
 
        mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
                           - working_disks);
@@ -4935,7 +5039,7 @@ static int run(mddev_t *mddev)
        mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
        mddev->resync_max_sectors = mddev->dev_sectors;
 
-       if (mddev->degraded > 0 &&
+       if (mddev->degraded > dirty_parity_disks &&
            mddev->recovery_cp != MaxSector) {
                if (mddev->ok_start_degraded)
                        printk(KERN_WARNING
@@ -4987,7 +5091,9 @@ static int run(mddev_t *mddev)
        }
 
        /* Ok, everything is just fine now */
-       if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+       if (mddev->to_remove == &raid5_attrs_group)
+               mddev->to_remove = NULL;
+       else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
                printk(KERN_WARNING
                       "raid5: failed to create sysfs attributes for %s\n",
                       mdname(mddev));
@@ -5027,15 +5133,15 @@ abort:
 
 static int stop(mddev_t *mddev)
 {
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev->private;
 
        md_unregister_thread(mddev->thread);
        mddev->thread = NULL;
        mddev->queue->backing_dev_info.congested_fn = NULL;
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
        free_conf(conf);
        mddev->private = NULL;
+       mddev->to_remove = &raid5_attrs_group;
        return 0;
 }
 
@@ -5076,7 +5182,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf)
 
 static void status(struct seq_file *seq, mddev_t *mddev)
 {
-       raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+       raid5_conf_t *conf = mddev->private;
        int i;
 
        seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
@@ -5361,9 +5467,11 @@ static int raid5_start_reshape(mddev_t *mddev)
                    !test_bit(Faulty, &rdev->flags)) {
                        if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
-                               set_bit(In_sync, &rdev->flags);
-                               added_devices++;
-                               rdev->recovery_offset = 0;
+                               if (rdev->raid_disk >= conf->previous_raid_disks) {
+                                       set_bit(In_sync, &rdev->flags);
+                                       added_devices++;
+                               } else
+                                       rdev->recovery_offset = 0;
                                sprintf(nm, "rd%d", rdev->raid_disk);
                                if (sysfs_create_link(&mddev->kobj,
                                                      &rdev->kobj, nm))
@@ -5375,9 +5483,12 @@ static int raid5_start_reshape(mddev_t *mddev)
                                break;
                }
 
+       /* When a reshape changes the number of devices, ->degraded
+        * is measured against the large of the pre and post number of
+        * devices.*/
        if (mddev->delta_disks > 0) {
                spin_lock_irqsave(&conf->device_lock, flags);
-               mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+               mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
                        - added_devices;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
@@ -5509,6 +5620,21 @@ static void raid5_quiesce(mddev_t *mddev, int state)
 }
 
 
+static void *raid5_takeover_raid0(mddev_t *mddev)
+{
+
+       mddev->new_level = 5;
+       mddev->new_layout = ALGORITHM_PARITY_N;
+       mddev->new_chunk_sectors = mddev->chunk_sectors;
+       mddev->raid_disks += 1;
+       mddev->delta_disks = 1;
+       /* make sure it will be not marked as dirty */
+       mddev->recovery_cp = MaxSector;
+
+       return setup_conf(mddev);
+}
+
+
 static void *raid5_takeover_raid1(mddev_t *mddev)
 {
        int chunksect;
@@ -5638,6 +5764,16 @@ static void *raid5_takeover(mddev_t *mddev)
         *  raid4 - trivial - just use a raid4 layout.
         *  raid6 - Providing it is a *_6 layout
         */
+       if (mddev->level == 0) {
+               /* for raid0 takeover only one zone is supported */
+               struct raid0_private_data *raid0_priv
+                       = mddev->private;
+               if (raid0_priv->nr_strip_zones > 1) {
+                       printk(KERN_ERR "md: cannot takeover raid 0 with more than one zone.\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               return raid5_takeover_raid0(mddev);
+       }
 
        if (mddev->level == 1)
                return raid5_takeover_raid1(mddev);
@@ -5787,6 +5923,7 @@ static void raid5_exit(void)
 module_init(raid5_init);
 module_exit(raid5_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
 MODULE_ALIAS("md-personality-4"); /* RAID5 */
 MODULE_ALIAS("md-raid5");
 MODULE_ALIAS("md-raid4");