include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / md / raid5.c
index 54ef8d7..e3e9a36 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/async.h>
 #include <linux/seq_file.h>
 #include <linux/cpu.h>
+#include <linux/slab.h>
 #include "md.h"
 #include "raid5.h"
 #include "bitmap.h"
@@ -156,13 +157,16 @@ static inline int raid6_next_disk(int disk, int raid_disks)
 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
                             int *count, int syndrome_disks)
 {
-       int slot;
+       int slot = *count;
 
+       if (sh->ddf_layout)
+               (*count)++;
        if (idx == sh->pd_idx)
                return syndrome_disks;
        if (idx == sh->qd_idx)
                return syndrome_disks + 1;
-       slot = (*count)++;
+       if (!sh->ddf_layout)
+               (*count)++;
        return slot;
 }
 
@@ -717,7 +721,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
        int i;
 
        for (i = 0; i < disks; i++)
-               srcs[i] = (void *)raid6_empty_zero_page;
+               srcs[i] = NULL;
 
        count = 0;
        i = d0_idx;
@@ -727,9 +731,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
                srcs[slot] = sh->dev[i].page;
                i = raid6_next_disk(i, disks);
        } while (i != d0_idx);
-       BUG_ON(count != syndrome_disks);
 
-       return count;
+       return syndrome_disks;
 }
 
 static struct dma_async_tx_descriptor *
@@ -810,11 +813,11 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
        BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
 
-       /* we need to open-code set_syndrome_sources to handle to the
+       /* we need to open-code set_syndrome_sources to handle the
         * slot number conversion for 'faila' and 'failb'
         */
        for (i = 0; i < disks ; i++)
-               blocks[i] = (void *)raid6_empty_zero_page;
+               blocks[i] = NULL;
        count = 0;
        i = d0_idx;
        do {
@@ -828,7 +831,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
                        failb = slot;
                i = raid6_next_disk(i, disks);
        } while (i != d0_idx);
-       BUG_ON(count != syndrome_disks);
 
        BUG_ON(faila == failb);
        if (failb < faila)
@@ -845,7 +847,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
                        init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
                                          ops_complete_compute, sh,
                                          to_addr_conv(sh, percpu));
-                       return async_gen_syndrome(blocks, 0, count+2,
+                       return async_gen_syndrome(blocks, 0, syndrome_disks+2,
                                                  STRIPE_SIZE, &submit);
                } else {
                        struct page *dest;
@@ -879,18 +881,21 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
                        return async_gen_syndrome(blocks, 0, count+2,
                                                  STRIPE_SIZE, &submit);
                }
-       }
-
-       init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute,
-                         sh, to_addr_conv(sh, percpu));
-       if (failb == syndrome_disks) {
-               /* We're missing D+P. */
-               return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE,
-                                              faila, blocks, &submit);
        } else {
-               /* We're missing D+D. */
-               return async_raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE,
-                                              faila, failb, blocks, &submit);
+               init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
+                                 ops_complete_compute, sh,
+                                 to_addr_conv(sh, percpu));
+               if (failb == syndrome_disks) {
+                       /* We're missing D+P. */
+                       return async_raid6_datap_recov(syndrome_disks+2,
+                                                      STRIPE_SIZE, faila,
+                                                      blocks, &submit);
+               } else {
+                       /* We're missing D+D. */
+                       return async_raid6_2data_recov(syndrome_disks+2,
+                                                      STRIPE_SIZE, faila, failb,
+                                                      blocks, &submit);
+               }
        }
 }
 
@@ -1136,7 +1141,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
 }
 
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
@@ -1201,22 +1206,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
+#ifdef CONFIG_MULTICORE_RAID456
+static void async_run_ops(void *param, async_cookie_t cookie)
+{
+       struct stripe_head *sh = param;
+       unsigned long ops_request = sh->ops.request;
+
+       clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
+       wake_up(&sh->ops.wait_for_ops);
+
+       __raid_run_ops(sh, ops_request);
+       release_stripe(sh);
+}
+
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+{
+       /* since handle_stripe can be called outside of raid5d context
+        * we need to ensure sh->ops.request is de-staged before another
+        * request arrives
+        */
+       wait_event(sh->ops.wait_for_ops,
+                  !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
+       sh->ops.request = ops_request;
+
+       atomic_inc(&sh->count);
+       async_schedule(async_run_ops, sh);
+}
+#else
+#define raid_run_ops __raid_run_ops
+#endif
+
 static int grow_one_stripe(raid5_conf_t *conf)
 {
        struct stripe_head *sh;
+       int disks = max(conf->raid_disks, conf->previous_raid_disks);
        sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
        if (!sh)
                return 0;
-       memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
+       memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev));
        sh->raid_conf = conf;
        spin_lock_init(&sh->lock);
+       #ifdef CONFIG_MULTICORE_RAID456
+       init_waitqueue_head(&sh->ops.wait_for_ops);
+       #endif
 
-       if (grow_buffers(sh, conf->raid_disks)) {
-               shrink_buffers(sh, conf->raid_disks);
+       if (grow_buffers(sh, disks)) {
+               shrink_buffers(sh, disks);
                kmem_cache_free(conf->slab_cache, sh);
                return 0;
        }
-       sh->disks = conf->raid_disks;
        /* we just created an active stripe so... */
        atomic_set(&sh->count, 1);
        atomic_inc(&conf->active_stripes);
@@ -1228,7 +1266,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
 static int grow_stripes(raid5_conf_t *conf, int num)
 {
        struct kmem_cache *sc;
-       int devs = conf->raid_disks;
+       int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        sprintf(conf->cache_name[0],
                "raid%d-%s", conf->level, mdname(conf->mddev));
@@ -1326,6 +1364,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
 
                nsh->raid_conf = conf;
                spin_lock_init(&nsh->lock);
+               #ifdef CONFIG_MULTICORE_RAID456
+               init_waitqueue_head(&nsh->ops.wait_for_ops);
+               #endif
 
                list_add(&nsh->lru, &newstripes);
        }
@@ -1617,8 +1658,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
        sector_t new_sector;
        int algorithm = previous ? conf->prev_algo
                                 : conf->algorithm;
-       int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
-                                        : (conf->chunk_size >> 9);
+       int sectors_per_chunk = previous ? conf->prev_chunk_sectors
+                                        : conf->chunk_sectors;
        int raid_disks = previous ? conf->previous_raid_disks
                                  : conf->raid_disks;
        int data_disks = raid_disks - conf->max_degraded;
@@ -1823,8 +1864,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
        int raid_disks = sh->disks;
        int data_disks = raid_disks - conf->max_degraded;
        sector_t new_sector = sh->sector, check;
-       int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
-                                        : (conf->chunk_size >> 9);
+       int sectors_per_chunk = previous ? conf->prev_chunk_sectors
+                                        : conf->chunk_sectors;
        int algorithm = previous ? conf->prev_algo
                                 : conf->algorithm;
        sector_t stripe;
@@ -1896,10 +1937,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
                case ALGORITHM_PARITY_N:
                        break;
                case ALGORITHM_ROTATING_N_CONTINUE:
+                       /* Like left_symmetric, but P is before Q */
                        if (sh->pd_idx == 0)
                                i--;    /* P D D D Q */
-                       else if (i > sh->pd_idx)
-                               i -= 2; /* D D Q P D */
+                       else {
+                               /* D D Q P D */
+                               if (i < sh->pd_idx)
+                                       i += raid_disks;
+                               i -= (sh->pd_idx + 1);
+                       }
                        break;
                case ALGORITHM_LEFT_ASYMMETRIC_6:
                case ALGORITHM_RIGHT_ASYMMETRIC_6:
@@ -2098,8 +2144,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
                            struct stripe_head *sh)
 {
        int sectors_per_chunk =
-               previous ? (conf->prev_chunk >> 9)
-                        : (conf->chunk_size >> 9);
+               previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
        int dd_idx;
        int chunk_offset = sector_div(stripe, sectors_per_chunk);
        int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
@@ -2894,7 +2939,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
  *
  */
 
-static bool handle_stripe5(struct stripe_head *sh)
+static void handle_stripe5(struct stripe_head *sh)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks, i;
@@ -2903,6 +2948,7 @@ static bool handle_stripe5(struct stripe_head *sh)
        struct r5dev *dev;
        mdk_rdev_t *blocked_rdev = NULL;
        int prexor;
+       int dec_preread_active = 0;
 
        memset(&s, 0, sizeof(s));
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -2922,7 +2968,8 @@ static bool handle_stripe5(struct stripe_head *sh)
        rcu_read_lock();
        for (i=disks; i--; ) {
                mdk_rdev_t *rdev;
-               struct r5dev *dev = &sh->dev[i];
+
+               dev = &sh->dev[i];
                clear_bit(R5_Insync, &dev->flags);
 
                pr_debug("check %d: state 0x%lx toread %p read %p write %p "
@@ -3051,12 +3098,8 @@ static bool handle_stripe5(struct stripe_head *sh)
                                        set_bit(STRIPE_INSYNC, &sh->state);
                        }
                }
-               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                       atomic_dec(&conf->preread_active_stripes);
-                       if (atomic_read(&conf->preread_active_stripes) <
-                               IO_THRESHOLD)
-                               md_wakeup_thread(conf->mddev->thread);
-               }
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       dec_preread_active = 1;
        }
 
        /* Now to consider new write requests and what else, if anything
@@ -3163,12 +3206,20 @@ static bool handle_stripe5(struct stripe_head *sh)
 
        ops_run_io(sh, &s);
 
+       if (dec_preread_active) {
+               /* We delay this until after ops_run_io so that if make_request
+                * is waiting on a barrier, it won't continue until the writes
+                * have actually been submitted.
+                */
+               atomic_dec(&conf->preread_active_stripes);
+               if (atomic_read(&conf->preread_active_stripes) <
+                   IO_THRESHOLD)
+                       md_wakeup_thread(conf->mddev->thread);
+       }
        return_io(return_bi);
-
-       return blocked_rdev == NULL;
 }
 
-static bool handle_stripe6(struct stripe_head *sh)
+static void handle_stripe6(struct stripe_head *sh)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks;
@@ -3178,6 +3229,7 @@ static bool handle_stripe6(struct stripe_head *sh)
        struct r6_state r6s;
        struct r5dev *dev, *pdev, *qdev;
        mdk_rdev_t *blocked_rdev = NULL;
+       int dec_preread_active = 0;
 
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
                "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3215,8 +3267,10 @@ static bool handle_stripe6(struct stripe_head *sh)
                /* now count some things */
                if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
                if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
-               if (test_bit(R5_Wantcompute, &dev->flags))
-                       BUG_ON(++s.compute > 2);
+               if (test_bit(R5_Wantcompute, &dev->flags)) {
+                       s.compute++;
+                       BUG_ON(s.compute > 2);
+               }
 
                if (test_bit(R5_Wantfill, &dev->flags)) {
                        s.to_fill++;
@@ -3313,7 +3367,6 @@ static bool handle_stripe6(struct stripe_head *sh)
         * completed
         */
        if (sh->reconstruct_state == reconstruct_state_drain_result) {
-               int qd_idx = sh->qd_idx;
 
                sh->reconstruct_state = reconstruct_state_idle;
                /* All the 'written' buffers and the parity blocks are ready to
@@ -3335,12 +3388,8 @@ static bool handle_stripe6(struct stripe_head *sh)
                                        set_bit(STRIPE_INSYNC, &sh->state);
                        }
                }
-               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                       atomic_dec(&conf->preread_active_stripes);
-                       if (atomic_read(&conf->preread_active_stripes) <
-                               IO_THRESHOLD)
-                               md_wakeup_thread(conf->mddev->thread);
-               }
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       dec_preread_active = 1;
        }
 
        /* Now to consider new write requests and what else, if anything
@@ -3449,18 +3498,27 @@ static bool handle_stripe6(struct stripe_head *sh)
 
        ops_run_io(sh, &s);
 
-       return_io(return_bi);
 
-       return blocked_rdev == NULL;
+       if (dec_preread_active) {
+               /* We delay this until after ops_run_io so that if make_request
+                * is waiting on a barrier, it won't continue until the writes
+                * have actually been submitted.
+                */
+               atomic_dec(&conf->preread_active_stripes);
+               if (atomic_read(&conf->preread_active_stripes) <
+                   IO_THRESHOLD)
+                       md_wakeup_thread(conf->mddev->thread);
+       }
+
+       return_io(return_bi);
 }
 
-/* returns true if the stripe was handled */
-static bool handle_stripe(struct stripe_head *sh)
+static void handle_stripe(struct stripe_head *sh)
 {
        if (sh->raid_conf->level == 6)
-               return handle_stripe6(sh);
+               handle_stripe6(sh);
        else
-               return handle_stripe5(sh);
+               handle_stripe5(sh);
 }
 
 static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3496,11 +3554,12 @@ static void activate_bit_delay(raid5_conf_t *conf)
 
 static void unplug_slaves(mddev_t *mddev)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        int i;
+       int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        rcu_read_lock();
-       for (i = 0; i < conf->raid_disks; i++) {
+       for (i = 0; i < devs; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
                        struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -3520,7 +3579,7 @@ static void unplug_slaves(mddev_t *mddev)
 static void raid5_unplug_device(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        unsigned long flags;
 
        spin_lock_irqsave(&conf->device_lock, flags);
@@ -3539,11 +3598,14 @@ static void raid5_unplug_device(struct request_queue *q)
 static int raid5_congested(void *data, int bits)
 {
        mddev_t *mddev = data;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
 
        /* No difference between reads and writes.  Just check
         * how busy the stripe_cache is
         */
+
+       if (mddev_congested(mddev, bits))
+               return 1;
        if (conf->inactive_blocked)
                return 1;
        if (conf->quiesce)
@@ -3564,14 +3626,14 @@ static int raid5_mergeable_bvec(struct request_queue *q,
        mddev_t *mddev = q->queuedata;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
        if ((bvm->bi_rw & 1) == WRITE)
                return biovec->bv_len; /* always allow writes to be mergeable */
 
-       if (mddev->new_chunk < mddev->chunk_size)
-               chunk_sectors = mddev->new_chunk >> 9;
+       if (mddev->new_chunk_sectors < mddev->chunk_sectors)
+               chunk_sectors = mddev->new_chunk_sectors;
        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
        if (max < 0) max = 0;
        if (max <= biovec->bv_len && bio_sectors == 0)
@@ -3584,11 +3646,11 @@ static int raid5_mergeable_bvec(struct request_queue *q,
 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
 {
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
-       unsigned int chunk_sectors = mddev->chunk_size >> 9;
+       unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bio->bi_size >> 9;
 
-       if (mddev->new_chunk < mddev->chunk_size)
-               chunk_sectors = mddev->new_chunk >> 9;
+       if (mddev->new_chunk_sectors < mddev->chunk_sectors)
+               chunk_sectors = mddev->new_chunk_sectors;
        return  chunk_sectors >=
                ((sector & (chunk_sectors - 1)) + bio_sectors);
 }
@@ -3652,7 +3714,7 @@ static void raid5_align_endio(struct bio *bi, int error)
        bio_put(bi);
 
        mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
-       conf = mddev_to_conf(mddev);
+       conf = mddev->private;
        rdev = (void*)raid_bi->bi_next;
        raid_bi->bi_next = NULL;
 
@@ -3675,10 +3737,10 @@ static int bio_fits_rdev(struct bio *bi)
 {
        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
-       if ((bi->bi_size>>9) > q->max_sectors)
+       if ((bi->bi_size>>9) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > q->max_phys_segments)
+       if (bi->bi_phys_segments > queue_max_segments(q))
                return 0;
 
        if (q->merge_bvec_fn)
@@ -3694,8 +3756,8 @@ static int bio_fits_rdev(struct bio *bi)
 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
 {
        mddev_t *mddev = q->queuedata;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
-       unsigned int dd_idx;
+       raid5_conf_t *conf = mddev->private;
+       int dd_idx;
        struct bio* align_bi;
        mdk_rdev_t *rdev;
 
@@ -3811,7 +3873,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
 static int make_request(struct request_queue *q, struct bio * bi)
 {
        mddev_t *mddev = q->queuedata;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        int dd_idx;
        sector_t new_sector;
        sector_t logical_sector, last_sector;
@@ -3819,8 +3881,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
        const int rw = bio_data_dir(bi);
        int cpu, remaining;
 
-       if (unlikely(bio_barrier(bi))) {
-               bio_endio(bi, -EOPNOTSUPP);
+       if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+               /* Drain all pending writes.  We only really need
+                * to ensure they have been submitted, but this is
+                * easier.
+                */
+               mddev->pers->quiesce(mddev, 1);
+               mddev->pers->quiesce(mddev, 0);
+               md_barrier_request(mddev, bi);
                return 0;
        }
 
@@ -3908,16 +3976,25 @@ static int make_request(struct request_queue *q, struct bio * bi)
                                spin_unlock_irq(&conf->device_lock);
                                if (must_retry) {
                                        release_stripe(sh);
+                                       schedule();
                                        goto retry;
                                }
                        }
-                       /* FIXME what if we get a false positive because these
-                        * are being updated.
-                        */
-                       if (logical_sector >= mddev->suspend_lo &&
+
+                       if (bio_data_dir(bi) == WRITE &&
+                           logical_sector >= mddev->suspend_lo &&
                            logical_sector < mddev->suspend_hi) {
                                release_stripe(sh);
-                               schedule();
+                               /* As the suspend_* range is controlled by
+                                * userspace, we want an interruptible
+                                * wait.
+                                */
+                               flush_signals(current);
+                               prepare_to_wait(&conf->wait_for_overlap,
+                                               &w, TASK_INTERRUPTIBLE);
+                               if (logical_sector >= mddev->suspend_lo &&
+                                   logical_sector < mddev->suspend_hi)
+                                       schedule();
                                goto retry;
                        }
 
@@ -3935,6 +4012,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
+                       if (mddev->barrier && 
+                           !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                               atomic_inc(&conf->preread_active_stripes);
                        release_stripe(sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
@@ -3954,6 +4034,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
 
                bio_endio(bi, 0);
        }
+
+       if (mddev->barrier) {
+               /* We need to wait for the stripes to all be handled.
+                * So: wait for preread_active_stripes to drop to 0.
+                */
+               wait_event(mddev->thread->wqueue,
+                          atomic_read(&conf->preread_active_stripes) == 0);
+       }
        return 0;
 }
 
@@ -3989,11 +4077,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
                        sector_nr = raid5_size(mddev, 0, 0)
                                - conf->reshape_progress;
-               } else if (mddev->delta_disks > 0 &&
+               } else if (mddev->delta_disks >= 0 &&
                           conf->reshape_progress > 0)
                        sector_nr = conf->reshape_progress;
                sector_div(sector_nr, new_data_disks);
                if (sector_nr) {
+                       mddev->curr_resync_completed = sector_nr;
+                       sysfs_notify(&mddev->kobj, NULL, "sync_completed");
                        *skipped = 1;
                        return sector_nr;
                }
@@ -4003,10 +4093,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
         * If old and new chunk sizes differ, we need to process the
         * largest of these
         */
-       if (mddev->new_chunk > mddev->chunk_size)
-               reshape_sectors = mddev->new_chunk / 512;
+       if (mddev->new_chunk_sectors > mddev->chunk_sectors)
+               reshape_sectors = mddev->new_chunk_sectors;
        else
-               reshape_sectors = mddev->chunk_size / 512;
+               reshape_sectors = mddev->chunk_sectors;
 
        /* we update the metadata when there is more than 3Meg
         * in the block range (that is rather arbitrary, should
@@ -4084,7 +4174,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
        INIT_LIST_HEAD(&stripes);
        for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
                int j;
-               int skipped = 0;
+               int skipped_disk = 0;
                sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
                set_bit(STRIPE_EXPANDING, &sh->state);
                atomic_inc(&conf->reshape_stripes);
@@ -4100,14 +4190,14 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                                continue;
                        s = compute_blocknr(sh, j, 0);
                        if (s < raid5_size(mddev, 0, 0)) {
-                               skipped = 1;
+                               skipped_disk = 1;
                                continue;
                        }
                        memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
                        set_bit(R5_Expanded, &sh->dev[j].flags);
                        set_bit(R5_UPTODATE, &sh->dev[j].flags);
                }
-               if (!skipped) {
+               if (!skipped_disk) {
                        set_bit(STRIPE_EXPAND_READY, &sh->state);
                        set_bit(STRIPE_HANDLE, &sh->state);
                }
@@ -4129,7 +4219,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                                     1, &dd_idx, NULL);
        last_sector =
                raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
-                                           *(new_data_disks) - 1),
+                                           * new_data_disks - 1),
                                     1, &dd_idx, NULL);
        if (last_sector >= mddev->dev_sectors)
                last_sector = mddev->dev_sectors - 1;
@@ -4158,7 +4248,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
                wait_event(conf->wait_for_overlap,
                           atomic_read(&conf->reshape_stripes) == 0);
                mddev->reshape_position = conf->reshape_progress;
-               mddev->curr_resync_completed = mddev->curr_resync;
+               mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
                conf->reshape_checkpoint = jiffies;
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
                md_wakeup_thread(mddev->thread);
@@ -4203,6 +4293,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
                return 0;
        }
 
+       /* Allow raid5_quiesce to complete */
+       wait_event(conf->wait_for_overlap, conf->quiesce != 2);
+
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
                return reshape_request(mddev, sector_nr, skipped);
 
@@ -4257,9 +4350,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
        clear_bit(STRIPE_INSYNC, &sh->state);
        spin_unlock(&sh->lock);
 
-       /* wait for any blocked device to be handled */
-       while (unlikely(!handle_stripe(sh)))
-               ;
+       handle_stripe(sh);
        release_stripe(sh);
 
        return STRIPE_SECTORS;
@@ -4329,37 +4420,6 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
        return handled;
 }
 
-#ifdef CONFIG_MULTICORE_RAID456
-static void __process_stripe(void *param, async_cookie_t cookie)
-{
-       struct stripe_head *sh = param;
-
-       handle_stripe(sh);
-       release_stripe(sh);
-}
-
-static void process_stripe(struct stripe_head *sh, struct list_head *domain)
-{
-       async_schedule_domain(__process_stripe, sh, domain);
-}
-
-static void synchronize_stripe_processing(struct list_head *domain)
-{
-       async_synchronize_full_domain(domain);
-}
-#else
-static void process_stripe(struct stripe_head *sh, struct list_head *domain)
-{
-       handle_stripe(sh);
-       release_stripe(sh);
-       cond_resched();
-}
-
-static void synchronize_stripe_processing(struct list_head *domain)
-{
-}
-#endif
-
 
 /*
  * This is our raid5 kernel thread.
@@ -4371,9 +4431,8 @@ static void synchronize_stripe_processing(struct list_head *domain)
 static void raid5d(mddev_t *mddev)
 {
        struct stripe_head *sh;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        int handled;
-       LIST_HEAD(raid_domain);
 
        pr_debug("+++ raid5d active\n");
 
@@ -4410,7 +4469,9 @@ static void raid5d(mddev_t *mddev)
                spin_unlock_irq(&conf->device_lock);
                
                handled++;
-               process_stripe(sh, &raid_domain);
+               handle_stripe(sh);
+               release_stripe(sh);
+               cond_resched();
 
                spin_lock_irq(&conf->device_lock);
        }
@@ -4418,7 +4479,6 @@ static void raid5d(mddev_t *mddev)
 
        spin_unlock_irq(&conf->device_lock);
 
-       synchronize_stripe_processing(&raid_domain);
        async_tx_issue_pending_all();
        unplug_slaves(mddev);
 
@@ -4428,7 +4488,7 @@ static void raid5d(mddev_t *mddev)
 static ssize_t
 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        if (conf)
                return sprintf(page, "%d\n", conf->max_nr_stripes);
        else
@@ -4438,7 +4498,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
 static ssize_t
 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        unsigned long new;
        int err;
 
@@ -4476,7 +4536,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
 static ssize_t
 raid5_show_preread_threshold(mddev_t *mddev, char *page)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        if (conf)
                return sprintf(page, "%d\n", conf->bypass_threshold);
        else
@@ -4486,7 +4546,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page)
 static ssize_t
 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        unsigned long new;
        if (len >= PAGE_SIZE)
                return -EINVAL;
@@ -4510,7 +4570,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
 static ssize_t
 stripe_cache_active_show(mddev_t *mddev, char *page)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        if (conf)
                return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
        else
@@ -4534,20 +4594,16 @@ static struct attribute_group raid5_attrs_group = {
 static sector_t
 raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
 
        if (!sectors)
                sectors = mddev->dev_sectors;
-       if (!raid_disks) {
+       if (!raid_disks)
                /* size is defined by the smallest of previous and new size */
-               if (conf->raid_disks < conf->previous_raid_disks)
-                       raid_disks = conf->raid_disks;
-               else
-                       raid_disks = conf->previous_raid_disks;
-       }
+               raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
 
-       sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
-       sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
+       sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+       sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
        return sectors * (raid_disks - conf->max_degraded);
 }
 
@@ -4625,7 +4681,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
 {
        unsigned long cpu;
        struct page *spare_page;
-       struct raid5_percpu *allcpus;
+       struct raid5_percpu __percpu *allcpus;
        void *scribble;
        int err;
 
@@ -4645,7 +4701,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
                        }
                        per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
                }
-               scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
+               scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
                if (!scribble) {
                        err = -ENOMEM;
                        break;
@@ -4666,7 +4722,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
 static raid5_conf_t *setup_conf(mddev_t *mddev)
 {
        raid5_conf_t *conf;
-       int raid_disk, memory;
+       int raid_disk, memory, max_disks;
        mdk_rdev_t *rdev;
        struct disk_info *disk;
 
@@ -4691,24 +4747,39 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                return ERR_PTR(-EINVAL);
        }
 
-       if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
+       if (!mddev->new_chunk_sectors ||
+           (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
+           !is_power_of_2(mddev->new_chunk_sectors)) {
                printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
-                       mddev->new_chunk, mdname(mddev));
+                      mddev->new_chunk_sectors << 9, mdname(mddev));
                return ERR_PTR(-EINVAL);
        }
 
        conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
        if (conf == NULL)
                goto abort;
+       spin_lock_init(&conf->device_lock);
+       init_waitqueue_head(&conf->wait_for_stripe);
+       init_waitqueue_head(&conf->wait_for_overlap);
+       INIT_LIST_HEAD(&conf->handle_list);
+       INIT_LIST_HEAD(&conf->hold_list);
+       INIT_LIST_HEAD(&conf->delayed_list);
+       INIT_LIST_HEAD(&conf->bitmap_list);
+       INIT_LIST_HEAD(&conf->inactive_list);
+       atomic_set(&conf->active_stripes, 0);
+       atomic_set(&conf->preread_active_stripes, 0);
+       atomic_set(&conf->active_aligned_reads, 0);
+       conf->bypass_threshold = BYPASS_THRESHOLD;
 
        conf->raid_disks = mddev->raid_disks;
-       conf->scribble_len = scribble_len(conf->raid_disks);
        if (mddev->reshape_position == MaxSector)
                conf->previous_raid_disks = mddev->raid_disks;
        else
                conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
+       max_disks = max(conf->raid_disks, conf->previous_raid_disks);
+       conf->scribble_len = scribble_len(max_disks);
 
-       conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
+       conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
                              GFP_KERNEL);
        if (!conf->disks)
                goto abort;
@@ -4722,24 +4793,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
        if (raid5_alloc_percpu(conf) != 0)
                goto abort;
 
-       spin_lock_init(&conf->device_lock);
-       init_waitqueue_head(&conf->wait_for_stripe);
-       init_waitqueue_head(&conf->wait_for_overlap);
-       INIT_LIST_HEAD(&conf->handle_list);
-       INIT_LIST_HEAD(&conf->hold_list);
-       INIT_LIST_HEAD(&conf->delayed_list);
-       INIT_LIST_HEAD(&conf->bitmap_list);
-       INIT_LIST_HEAD(&conf->inactive_list);
-       atomic_set(&conf->active_stripes, 0);
-       atomic_set(&conf->preread_active_stripes, 0);
-       atomic_set(&conf->active_aligned_reads, 0);
-       conf->bypass_threshold = BYPASS_THRESHOLD;
-
        pr_debug("raid5: run(%s) called.\n", mdname(mddev));
 
        list_for_each_entry(rdev, &mddev->disks, same_set) {
                raid_disk = rdev->raid_disk;
-               if (raid_disk >= conf->raid_disks
+               if (raid_disk >= max_disks
                    || raid_disk < 0)
                        continue;
                disk = conf->disks + raid_disk;
@@ -4756,7 +4814,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                        conf->fullsync = 1;
        }
 
-       conf->chunk_size = mddev->new_chunk;
+       conf->chunk_sectors = mddev->new_chunk_sectors;
+       conf->level = mddev->new_level;
        if (conf->level == 6)
                conf->max_degraded = 2;
        else
@@ -4765,12 +4824,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
        conf->max_nr_stripes = NR_STRIPES;
        conf->reshape_progress = mddev->reshape_position;
        if (conf->reshape_progress != MaxSector) {
-               conf->prev_chunk = mddev->chunk_size;
+               conf->prev_chunk_sectors = mddev->chunk_sectors;
                conf->prev_algo = mddev->layout;
        }
 
        memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
-                conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
+                max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
        if (grow_stripes(conf, conf->max_nr_stripes)) {
                printk(KERN_ERR
                        "raid5: couldn't allocate %dkB for buffers\n", memory);
@@ -4779,7 +4838,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                printk(KERN_INFO "raid5: allocated %dkB for %s\n",
                        memory, mdname(mddev));
 
-       conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
+       conf->thread = md_register_thread(raid5d, mddev, NULL);
        if (!conf->thread) {
                printk(KERN_ERR
                       "raid5: couldn't allocate thread for %s\n",
@@ -4797,12 +4856,45 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
                return ERR_PTR(-ENOMEM);
 }
 
+
+static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
+{
+       switch (algo) {
+       case ALGORITHM_PARITY_0:
+               if (raid_disk < max_degraded)
+                       return 1;
+               break;
+       case ALGORITHM_PARITY_N:
+               if (raid_disk >= raid_disks - max_degraded)
+                       return 1;
+               break;
+       case ALGORITHM_PARITY_0_6:
+               if (raid_disk == 0 || 
+                   raid_disk == raid_disks - 1)
+                       return 1;
+               break;
+       case ALGORITHM_LEFT_ASYMMETRIC_6:
+       case ALGORITHM_RIGHT_ASYMMETRIC_6:
+       case ALGORITHM_LEFT_SYMMETRIC_6:
+       case ALGORITHM_RIGHT_SYMMETRIC_6:
+               if (raid_disk == raid_disks - 1)
+                       return 1;
+       }
+       return 0;
+}
+
 static int run(mddev_t *mddev)
 {
        raid5_conf_t *conf;
-       int working_disks = 0;
+       int working_disks = 0, chunk_size;
+       int dirty_parity_disks = 0;
        mdk_rdev_t *rdev;
+       sector_t reshape_offset = 0;
 
+       if (mddev->recovery_cp != MaxSector)
+               printk(KERN_NOTICE "raid5: %s is not clean"
+                      " -- starting background reconstruction\n",
+                      mdname(mddev));
        if (mddev->reshape_position != MaxSector) {
                /* Check that we can continue the reshape.
                 * Currently only disks can change, it must
@@ -4825,19 +4917,39 @@ static int run(mddev_t *mddev)
                 * geometry.
                 */
                here_new = mddev->reshape_position;
-               if (sector_div(here_new, (mddev->new_chunk>>9)*
+               if (sector_div(here_new, mddev->new_chunk_sectors *
                               (mddev->raid_disks - max_degraded))) {
                        printk(KERN_ERR "raid5: reshape_position not "
                               "on a stripe boundary\n");
                        return -EINVAL;
                }
+               reshape_offset = here_new * mddev->new_chunk_sectors;
                /* here_new is the stripe we will write to */
                here_old = mddev->reshape_position;
-               sector_div(here_old, (mddev->chunk_size>>9)*
+               sector_div(here_old, mddev->chunk_sectors *
                           (old_disks-max_degraded));
                /* here_old is the first stripe that we might need to read
                 * from */
-               if (here_new >= here_old) {
+               if (mddev->delta_disks == 0) {
+                       /* We cannot be sure it is safe to start an in-place
+                        * reshape.  It is only safe if user-space if monitoring
+                        * and taking constant backups.
+                        * mdadm always starts a situation like this in
+                        * readonly mode so it can take control before
+                        * allowing any writes.  So just check for that.
+                        */
+                       if ((here_new * mddev->new_chunk_sectors != 
+                            here_old * mddev->chunk_sectors) ||
+                           mddev->ro == 0) {
+                               printk(KERN_ERR "raid5: in-place reshape must be started"
+                                      " in read-only mode - aborting\n");
+                               return -EINVAL;
+                       }
+               } else if (mddev->delta_disks < 0
+                   ? (here_new * mddev->new_chunk_sectors <=
+                      here_old * mddev->chunk_sectors)
+                   : (here_new * mddev->new_chunk_sectors >=
+                      here_old * mddev->chunk_sectors)) {
                        /* Reading from the same stripe as writing to - bad */
                        printk(KERN_ERR "raid5: reshape_position too early for "
                               "auto-recovery - aborting.\n");
@@ -4848,7 +4960,7 @@ static int run(mddev_t *mddev)
        } else {
                BUG_ON(mddev->level != mddev->new_level);
                BUG_ON(mddev->layout != mddev->new_layout);
-               BUG_ON(mddev->chunk_size != mddev->new_chunk);
+               BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
                BUG_ON(mddev->delta_disks != 0);
        }
 
@@ -4867,12 +4979,54 @@ static int run(mddev_t *mddev)
        /*
         * 0 for a fully functional array, 1 or 2 for a degraded array.
         */
-       list_for_each_entry(rdev, &mddev->disks, same_set)
-               if (rdev->raid_disk >= 0 &&
-                   test_bit(In_sync, &rdev->flags))
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
+               if (rdev->raid_disk < 0)
+                       continue;
+               if (test_bit(In_sync, &rdev->flags))
                        working_disks++;
+               /* This disc is not fully in-sync.  However if it
+                * just stored parity (beyond the recovery_offset),
+                * when we don't need to be concerned about the
+                * array being dirty.
+                * When reshape goes 'backwards', we never have
+                * partially completed devices, so we only need
+                * to worry about reshape going forwards.
+                */
+               /* Hack because v0.91 doesn't store recovery_offset properly. */
+               if (mddev->major_version == 0 &&
+                   mddev->minor_version > 90)
+                       rdev->recovery_offset = reshape_offset;
+                       
+               printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
+                      rdev->raid_disk, working_disks, conf->prev_algo,
+                      conf->previous_raid_disks, conf->max_degraded,
+                      conf->algorithm, conf->raid_disks, 
+                      only_parity(rdev->raid_disk,
+                                  conf->prev_algo,
+                                  conf->previous_raid_disks,
+                                  conf->max_degraded),
+                      only_parity(rdev->raid_disk,
+                                  conf->algorithm,
+                                  conf->raid_disks,
+                                  conf->max_degraded));
+               if (rdev->recovery_offset < reshape_offset) {
+                       /* We need to check old and new layout */
+                       if (!only_parity(rdev->raid_disk,
+                                        conf->algorithm,
+                                        conf->raid_disks,
+                                        conf->max_degraded))
+                               continue;
+               }
+               if (!only_parity(rdev->raid_disk,
+                                conf->prev_algo,
+                                conf->previous_raid_disks,
+                                conf->max_degraded))
+                       continue;
+               dirty_parity_disks++;
+       }
 
-       mddev->degraded = conf->raid_disks - working_disks;
+       mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
+                          - working_disks);
 
        if (mddev->degraded > conf->max_degraded) {
                printk(KERN_ERR "raid5: not enough operational devices for %s"
@@ -4882,10 +5036,10 @@ static int run(mddev_t *mddev)
        }
 
        /* device size must be a multiple of chunk size */
-       mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
+       mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
        mddev->resync_max_sectors = mddev->dev_sectors;
 
-       if (mddev->degraded > 0 &&
+       if (mddev->degraded > dirty_parity_disks &&
            mddev->recovery_cp != MaxSector) {
                if (mddev->ok_start_degraded)
                        printk(KERN_WARNING
@@ -4922,7 +5076,7 @@ static int run(mddev_t *mddev)
                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
                set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                mddev->sync_thread = md_register_thread(md_do_sync, mddev,
-                                                       "%s_reshape");
+                                                       "reshape");
        }
 
        /* read-ahead size must cover two whole stripes, which is
@@ -4931,7 +5085,7 @@ static int run(mddev_t *mddev)
        {
                int data_disks = conf->previous_raid_disks - conf->max_degraded;
                int stripe = data_disks *
-                       (mddev->chunk_size / PAGE_SIZE);
+                       ((mddev->chunk_sectors << 9) / PAGE_SIZE);
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
        }
@@ -4951,6 +5105,14 @@ static int run(mddev_t *mddev)
        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
        blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+       chunk_size = mddev->chunk_sectors << 9;
+       blk_queue_io_min(mddev->queue, chunk_size);
+       blk_queue_io_opt(mddev->queue, chunk_size *
+                        (conf->raid_disks - conf->max_degraded));
+
+       list_for_each_entry(rdev, &mddev->disks, same_set)
+               disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                 rdev->data_offset << 9);
 
        return 0;
 abort:
@@ -4975,9 +5137,8 @@ static int stop(mddev_t *mddev)
        mddev->thread = NULL;
        mddev->queue->backing_dev_info.congested_fn = NULL;
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
-       sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
        free_conf(conf);
-       mddev->private = NULL;
+       mddev->private = &raid5_attrs_group;
        return 0;
 }
 
@@ -5021,7 +5182,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
        int i;
 
-       seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
+       seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
+               mddev->chunk_sectors / 2, mddev->layout);
        seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
        for (i = 0; i < conf->raid_disks; i++)
                seq_printf (seq, "%s",
@@ -5169,7 +5331,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
         * any io in the removed space completes, but it hardly seems
         * worth it.
         */
-       sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
+       sectors &= ~((sector_t)mddev->chunk_sectors - 1);
        md_set_array_sectors(mddev, raid5_size(mddev, sectors,
                                               mddev->raid_disks));
        if (mddev->array_sectors >
@@ -5177,6 +5339,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
                return -EINVAL;
        set_capacity(mddev->gendisk, mddev->array_sectors);
        mddev->changed = 1;
+       revalidate_disk(mddev->gendisk);
        if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
                mddev->recovery_cp = mddev->dev_sectors;
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5186,14 +5349,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
        return 0;
 }
 
-static int raid5_check_reshape(mddev_t *mddev)
+static int check_stripe_cache(mddev_t *mddev)
+{
+       /* Can only proceed if there are plenty of stripe_heads.
+        * We need a minimum of one full stripe,, and for sensible progress
+        * it is best to have about 4 times that.
+        * If we require 4 times, then the default 256 4K stripe_heads will
+        * allow for chunk sizes up to 256K, which is probably OK.
+        * If the chunk size is greater, user-space should request more
+        * stripe_heads first.
+        */
+       raid5_conf_t *conf = mddev->private;
+       if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
+           > conf->max_nr_stripes ||
+           ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
+           > conf->max_nr_stripes) {
+               printk(KERN_WARNING "raid5: reshape: not enough stripes.  Needed %lu\n",
+                      ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+                       / STRIPE_SIZE)*4);
+               return 0;
+       }
+       return 1;
+}
+
+static int check_reshape(mddev_t *mddev)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
 
        if (mddev->delta_disks == 0 &&
            mddev->new_layout == mddev->layout &&
-           mddev->new_chunk == mddev->chunk_size)
-               return -EINVAL; /* nothing to do */
+           mddev->new_chunk_sectors == mddev->chunk_sectors)
+               return 0; /* nothing to do */
        if (mddev->bitmap)
                /* Cannot grow a bitmap yet */
                return -EBUSY;
@@ -5212,28 +5398,15 @@ static int raid5_check_reshape(mddev_t *mddev)
                        return -EINVAL;
        }
 
-       /* Can only proceed if there are plenty of stripe_heads.
-        * We need a minimum of one full stripe,, and for sensible progress
-        * it is best to have about 4 times that.
-        * If we require 4 times, then the default 256 4K stripe_heads will
-        * allow for chunk sizes up to 256K, which is probably OK.
-        * If the chunk size is greater, user-space should request more
-        * stripe_heads first.
-        */
-       if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
-           (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
-               printk(KERN_WARNING "raid5: reshape: not enough stripes.  Needed %lu\n",
-                      (max(mddev->chunk_size, mddev->new_chunk)
-                       / STRIPE_SIZE)*4);
+       if (!check_stripe_cache(mddev))
                return -ENOSPC;
-       }
 
        return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
 }
 
 static int raid5_start_reshape(mddev_t *mddev)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
        mdk_rdev_t *rdev;
        int spares = 0;
        int added_devices = 0;
@@ -5242,6 +5415,9 @@ static int raid5_start_reshape(mddev_t *mddev)
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                return -EBUSY;
 
+       if (!check_stripe_cache(mddev))
+               return -ENOSPC;
+
        list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->raid_disk < 0 &&
                    !test_bit(Faulty, &rdev->flags))
@@ -5268,8 +5444,8 @@ static int raid5_start_reshape(mddev_t *mddev)
        spin_lock_irq(&conf->device_lock);
        conf->previous_raid_disks = conf->raid_disks;
        conf->raid_disks += mddev->delta_disks;
-       conf->prev_chunk = conf->chunk_size;
-       conf->chunk_size = mddev->new_chunk;
+       conf->prev_chunk_sectors = conf->chunk_sectors;
+       conf->chunk_sectors = mddev->new_chunk_sectors;
        conf->prev_algo = conf->algorithm;
        conf->algorithm = mddev->new_layout;
        if (mddev->delta_disks < 0)
@@ -5288,9 +5464,11 @@ static int raid5_start_reshape(mddev_t *mddev)
                    !test_bit(Faulty, &rdev->flags)) {
                        if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
-                               set_bit(In_sync, &rdev->flags);
-                               added_devices++;
-                               rdev->recovery_offset = 0;
+                               if (rdev->raid_disk >= conf->previous_raid_disks) {
+                                       set_bit(In_sync, &rdev->flags);
+                                       added_devices++;
+                               } else
+                                       rdev->recovery_offset = 0;
                                sprintf(nm, "rd%d", rdev->raid_disk);
                                if (sysfs_create_link(&mddev->kobj,
                                                      &rdev->kobj, nm))
@@ -5302,14 +5480,17 @@ static int raid5_start_reshape(mddev_t *mddev)
                                break;
                }
 
+       /* When a reshape changes the number of devices, ->degraded
+        * is measured against the large of the pre and post number of
+        * devices.*/
        if (mddev->delta_disks > 0) {
                spin_lock_irqsave(&conf->device_lock, flags);
-               mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+               mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
                        - added_devices;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
        mddev->raid_disks = conf->raid_disks;
-       mddev->reshape_position = 0;
+       mddev->reshape_position = conf->reshape_progress;
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5317,7 +5498,7 @@ static int raid5_start_reshape(mddev_t *mddev)
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
        mddev->sync_thread = md_register_thread(md_do_sync, mddev,
-                                               "%s_reshape");
+                                               "reshape");
        if (!mddev->sync_thread) {
                mddev->recovery = 0;
                spin_lock_irq(&conf->device_lock);
@@ -5351,7 +5532,7 @@ static void end_reshape(raid5_conf_t *conf)
                 */
                {
                        int data_disks = conf->raid_disks - conf->max_degraded;
-                       int stripe = data_disks * (conf->chunk_size
+                       int stripe = data_disks * ((conf->chunk_sectors << 9)
                                                   / PAGE_SIZE);
                        if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                                conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
@@ -5364,8 +5545,7 @@ static void end_reshape(raid5_conf_t *conf)
  */
 static void raid5_finish_reshape(mddev_t *mddev)
 {
-       struct block_device *bdev;
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
 
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
@@ -5373,15 +5553,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
                        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
                        set_capacity(mddev->gendisk, mddev->array_sectors);
                        mddev->changed = 1;
-
-                       bdev = bdget_disk(mddev->gendisk, 0);
-                       if (bdev) {
-                               mutex_lock(&bdev->bd_inode->i_mutex);
-                               i_size_write(bdev->bd_inode,
-                                            (loff_t)mddev->array_sectors << 9);
-                               mutex_unlock(&bdev->bd_inode->i_mutex);
-                               bdput(bdev);
-                       }
+                       revalidate_disk(mddev->gendisk);
                } else {
                        int d;
                        mddev->degraded = conf->raid_disks;
@@ -5392,11 +5564,18 @@ static void raid5_finish_reshape(mddev_t *mddev)
                                        mddev->degraded--;
                        for (d = conf->raid_disks ;
                             d < conf->raid_disks - mddev->delta_disks;
-                            d++)
-                               raid5_remove_disk(mddev, d);
+                            d++) {
+                               mdk_rdev_t *rdev = conf->disks[d].rdev;
+                               if (rdev && raid5_remove_disk(mddev, d) == 0) {
+                                       char nm[20];
+                                       sprintf(nm, "rd%d", rdev->raid_disk);
+                                       sysfs_remove_link(&mddev->kobj, nm);
+                                       rdev->raid_disk = -1;
+                               }
+                       }
                }
                mddev->layout = conf->algorithm;
-               mddev->chunk_size = conf->chunk_size;
+               mddev->chunk_sectors = conf->chunk_sectors;
                mddev->reshape_position = MaxSector;
                mddev->delta_disks = 0;
        }
@@ -5404,7 +5583,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
 
 static void raid5_quiesce(mddev_t *mddev, int state)
 {
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
 
        switch(state) {
        case 2: /* resume for a suspend */
@@ -5413,12 +5592,18 @@ static void raid5_quiesce(mddev_t *mddev, int state)
 
        case 1: /* stop all writes */
                spin_lock_irq(&conf->device_lock);
-               conf->quiesce = 1;
+               /* '2' tells resync/reshape to pause so that all
+                * active stripes can drain
+                */
+               conf->quiesce = 2;
                wait_event_lock_irq(conf->wait_for_stripe,
                                    atomic_read(&conf->active_stripes) == 0 &&
                                    atomic_read(&conf->active_aligned_reads) == 0,
                                    conf->device_lock, /* nothing */);
+               conf->quiesce = 1;
                spin_unlock_irq(&conf->device_lock);
+               /* allow reshape to continue */
+               wake_up(&conf->wait_for_overlap);
                break;
 
        case 0: /* re-enable writes */
@@ -5454,7 +5639,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev)
 
        mddev->new_level = 5;
        mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
-       mddev->new_chunk = chunksect << 9;
+       mddev->new_chunk_sectors = chunksect;
 
        return setup_conf(mddev);
 }
@@ -5493,24 +5678,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev)
 }
 
 
-static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
+static int raid5_check_reshape(mddev_t *mddev)
 {
        /* For a 2-drive array, the layout and chunk size can be changed
         * immediately as not restriping is needed.
         * For larger arrays we record the new value - after validation
         * to be used by a reshape pass.
         */
-       raid5_conf_t *conf = mddev_to_conf(mddev);
+       raid5_conf_t *conf = mddev->private;
+       int new_chunk = mddev->new_chunk_sectors;
 
-       if (new_layout >= 0 && !algorithm_valid_raid5(new_layout))
+       if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
                return -EINVAL;
        if (new_chunk > 0) {
-               if (new_chunk & (new_chunk-1))
-                       /* not a power of 2 */
+               if (!is_power_of_2(new_chunk))
                        return -EINVAL;
-               if (new_chunk < PAGE_SIZE)
+               if (new_chunk < (PAGE_SIZE>>9))
                        return -EINVAL;
-               if (mddev->array_sectors & ((new_chunk>>9)-1))
+               if (mddev->array_sectors & (new_chunk-1))
                        /* not factor of array size */
                        return -EINVAL;
        }
@@ -5518,49 +5703,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
        /* They look valid */
 
        if (mddev->raid_disks == 2) {
-
-               if (new_layout >= 0) {
-                       conf->algorithm = new_layout;
-                       mddev->layout = mddev->new_layout = new_layout;
+               /* can make the change immediately */
+               if (mddev->new_layout >= 0) {
+                       conf->algorithm = mddev->new_layout;
+                       mddev->layout = mddev->new_layout;
                }
                if (new_chunk > 0) {
-                       conf->chunk_size = new_chunk;
-                       mddev->chunk_size = mddev->new_chunk = new_chunk;
+                       conf->chunk_sectors = new_chunk ;
+                       mddev->chunk_sectors = new_chunk;
                }
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
                md_wakeup_thread(mddev->thread);
-       } else {
-               if (new_layout >= 0)
-                       mddev->new_layout = new_layout;
-               if (new_chunk > 0)
-                       mddev->new_chunk = new_chunk;
        }
-       return 0;
+       return check_reshape(mddev);
 }
 
-static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
+static int raid6_check_reshape(mddev_t *mddev)
 {
-       if (new_layout >= 0 && !algorithm_valid_raid6(new_layout))
+       int new_chunk = mddev->new_chunk_sectors;
+
+       if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
                return -EINVAL;
        if (new_chunk > 0) {
-               if (new_chunk & (new_chunk-1))
-                       /* not a power of 2 */
+               if (!is_power_of_2(new_chunk))
                        return -EINVAL;
-               if (new_chunk < PAGE_SIZE)
+               if (new_chunk < (PAGE_SIZE >> 9))
                        return -EINVAL;
-               if (mddev->array_sectors & ((new_chunk>>9)-1))
+               if (mddev->array_sectors & (new_chunk-1))
                        /* not factor of array size */
                        return -EINVAL;
        }
 
        /* They look valid */
-
-       if (new_layout >= 0)
-               mddev->new_layout = new_layout;
-       if (new_chunk > 0)
-               mddev->new_chunk = new_chunk;
-
-       return 0;
+       return check_reshape(mddev);
 }
 
 static void *raid5_takeover(mddev_t *mddev)
@@ -5570,8 +5745,6 @@ static void *raid5_takeover(mddev_t *mddev)
         *  raid1 - if there are two drives.  We need to know the chunk size
         *  raid4 - trivial - just use a raid4 layout.
         *  raid6 - Providing it is a *_6 layout
-        *
-        * For now, just do raid1
         */
 
        if (mddev->level == 1)
@@ -5653,12 +5826,11 @@ static struct mdk_personality raid6_personality =
        .sync_request   = sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
-       .check_reshape  = raid5_check_reshape,
+       .check_reshape  = raid6_check_reshape,
        .start_reshape  = raid5_start_reshape,
        .finish_reshape = raid5_finish_reshape,
        .quiesce        = raid5_quiesce,
        .takeover       = raid6_takeover,
-       .reconfig       = raid6_reconfig,
 };
 static struct mdk_personality raid5_personality =
 {
@@ -5681,7 +5853,6 @@ static struct mdk_personality raid5_personality =
        .finish_reshape = raid5_finish_reshape,
        .quiesce        = raid5_quiesce,
        .takeover       = raid5_takeover,
-       .reconfig       = raid5_reconfig,
 };
 
 static struct mdk_personality raid4_personality =
@@ -5724,6 +5895,7 @@ static void raid5_exit(void)
 module_init(raid5_init);
 module_exit(raid5_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
 MODULE_ALIAS("md-personality-4"); /* RAID5 */
 MODULE_ALIAS("md-raid5");
 MODULE_ALIAS("md-raid4");