md: kill STRIPE_OP_IO flag
[safe/jmp/linux-2.6] / drivers / md / raid5.c
index 475fba4..cac9708 100644 (file)
@@ -115,9 +115,7 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
-               bi->bi_end_io(bi,
-                             test_bit(BIO_UPTODATE, &bi->bi_flags)
-                               ? 0 : -EIO);
+               bio_endio(bi, 0);
                bi = return_bi;
        }
 }
@@ -375,8 +373,6 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
        test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
        test_and_ack_op(STRIPE_OP_POSTXOR, pending);
        test_and_ack_op(STRIPE_OP_CHECK, pending);
-       if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
-               ack++;
 
        sh->ops.count -= ack;
        if (unlikely(sh->ops.count < 0)) {
@@ -401,7 +397,6 @@ static void ops_run_io(struct stripe_head *sh)
 
        might_sleep();
 
-       set_bit(STRIPE_IO_STARTED, &sh->state);
        for (i = disks; i--; ) {
                int rw;
                struct bio *bi;
@@ -435,6 +430,8 @@ static void ops_run_io(struct stripe_head *sh)
                                test_bit(STRIPE_EXPAND_READY, &sh->state))
                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
+                       set_bit(STRIPE_IO_STARTED, &sh->state);
+
                        bi->bi_bdev = rdev->bdev;
                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
@@ -839,15 +836,10 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
 static void ops_complete_check(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
-       int pd_idx = sh->pd_idx;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
-       if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
-               sh->ops.zero_sum_result == 0)
-               set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-
        set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
@@ -875,11 +867,6 @@ static void ops_run_check(struct stripe_head *sh)
        tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
                &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
 
-       if (tx)
-               set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
-       else
-               clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
-
        atomic_inc(&sh->count);
        tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
                ops_complete_check, sh);
@@ -912,9 +899,6 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
        if (test_bit(STRIPE_OP_CHECK, &pending))
                ops_run_check(sh);
 
-       if (test_bit(STRIPE_OP_IO, &pending))
-               ops_run_io(sh);
-
        if (overlap_clear)
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -2025,8 +2009,6 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
                         */
                        set_bit(R5_LOCKED, &dev->flags);
                        set_bit(R5_Wantread, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        s->locked++;
                        pr_debug("Reading block %d (sync=%d)\n", disk_idx,
                                s->syncing);
@@ -2220,9 +2202,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                                                "%d for r-m-w\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
-                                       if (!test_and_set_bit(
-                                               STRIPE_OP_IO, &sh->ops.pending))
-                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -2246,9 +2225,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                                                "%d for Reconstruct\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
-                                       if (!test_and_set_bit(
-                                               STRIPE_OP_IO, &sh->ops.pending))
-                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -2456,8 +2432,6 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
 
                set_bit(R5_LOCKED, &dev->flags);
                set_bit(R5_Wantwrite, &dev->flags);
-               if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                       sh->ops.count++;
 
                clear_bit(STRIPE_DEGRADED, &sh->state);
                s->locked++;
@@ -2813,9 +2787,6 @@ static void handle_stripe5(struct stripe_head *sh)
                                (i == sh->pd_idx || dev->written)) {
                                pr_debug("Writing block %d\n", i);
                                set_bit(R5_Wantwrite, &dev->flags);
-                               if (!test_and_set_bit(
-                                   STRIPE_OP_IO, &sh->ops.pending))
-                                       sh->ops.count++;
                                if (prexor)
                                        continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
@@ -2869,16 +2840,12 @@ static void handle_stripe5(struct stripe_head *sh)
                dev = &sh->dev[s.failed_num];
                if (!test_bit(R5_ReWrite, &dev->flags)) {
                        set_bit(R5_Wantwrite, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        set_bit(R5_ReWrite, &dev->flags);
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                } else {
                        /* let's read it back */
                        set_bit(R5_Wantread, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                }
@@ -2896,11 +2863,10 @@ static void handle_stripe5(struct stripe_head *sh)
                clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
                clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
 
-               for (i = conf->raid_disks; i--; ) {
+               for (i = conf->raid_disks; i--; )
                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
-               }
+                       set_bit(R5_LOCKED, &dev->flags);
+                       s.locked++;
        }
 
        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
@@ -2911,6 +2877,7 @@ static void handle_stripe5(struct stripe_head *sh)
                        conf->raid_disks);
                s.locked += handle_write_operations5(sh, 1, 1);
        } else if (s.expanded &&
+                  s.locked == 0 &&
                !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
                atomic_dec(&conf->reshape_stripes);
@@ -2935,6 +2902,8 @@ static void handle_stripe5(struct stripe_head *sh)
        if (pending)
                raid5_run_ops(sh, pending);
 
+       ops_run_io(sh);
+
        return_io(return_bi);
 
 }
@@ -3697,9 +3666,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
-               bi->bi_end_io(bi,
-                             test_bit(BIO_UPTODATE, &bi->bi_flags)
-                               ? 0 : -EIO);
+               bio_endio(bi, 0);
        }
        return 0;
 }
@@ -4002,12 +3969,8 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
        spin_lock_irq(&conf->device_lock);
        remaining = --raid_bio->bi_phys_segments;
        spin_unlock_irq(&conf->device_lock);
-       if (remaining == 0) {
-
-               raid_bio->bi_end_io(raid_bio,
-                             test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
-                               ? 0 : -EIO);
-       }
+       if (remaining == 0)
+               bio_endio(raid_bio, 0);
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;
@@ -4609,35 +4572,41 @@ abort:
 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        raid5_conf_t *conf = mddev->private;
-       int found = 0;
+       int err = -EEXIST;
        int disk;
        struct disk_info *p;
+       int first = 0;
+       int last = conf->raid_disks - 1;
 
        if (mddev->degraded > conf->max_degraded)
                /* no point adding a device */
-               return 0;
+               return -EINVAL;
+
+       if (rdev->raid_disk >= 0)
+               first = last = rdev->raid_disk;
 
        /*
         * find the disk ... but prefer rdev->saved_raid_disk
         * if possible.
         */
        if (rdev->saved_raid_disk >= 0 &&
+           rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
                disk = rdev->saved_raid_disk;
        else
-               disk = 0;
-       for ( ; disk < conf->raid_disks; disk++)
+               disk = first;
+       for ( ; disk <= last ; disk++)
                if ((p=conf->disks + disk)->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = disk;
-                       found = 1;
+                       err = 0;
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
                        break;
                }
        print_raid5_conf(conf);
-       return found;
+       return err;
 }
 
 static int raid5_resize(mddev_t *mddev, sector_t sectors)
@@ -4738,7 +4707,7 @@ static int raid5_start_reshape(mddev_t *mddev)
        rdev_for_each(rdev, rtmp, mddev)
                if (rdev->raid_disk < 0 &&
                    !test_bit(Faulty, &rdev->flags)) {
-                       if (raid5_add_disk(mddev, rdev)) {
+                       if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
                                set_bit(In_sync, &rdev->flags);
                                added_devices++;