include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / md / raid10.c
index 191f42e..e2766d8 100644 (file)
@@ -18,6 +18,7 @@
  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/blkdev.h>
 #include <linux/seq_file.h>
@@ -68,7 +69,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 
        /* allocate a r10bio with room for raid_disks entries in the bios array */
        r10_bio = kzalloc(size, gfp_flags);
-       if (!r10_bio)
+       if (!r10_bio && conf->mddev)
                unplug_slaves(conf->mddev);
 
        return r10_bio;
@@ -804,7 +805,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
        mdk_rdev_t *blocked_rdev;
 
        if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
-               bio_endio(bio, -EOPNOTSUPP);
+               md_barrier_request(mddev, bio);
                return 0;
        }
 
@@ -884,7 +885,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | do_sync;
+               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
                read_bio->bi_private = r10_bio;
 
                generic_make_request(read_bio);
@@ -952,7 +953,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
                        conf->mirrors[d].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync;
+               mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
@@ -1155,13 +1156,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
-                       /* as we don't honour merge_bvec_fn, we must never risk
-                        * violating it, so limit ->max_sector to one PAGE, as
-                        * a one page request is never in violation.
+                       /* as we don't honour merge_bvec_fn, we must
+                        * never risk violating it, so limit
+                        * ->max_segments to one lying with a single
+                        * page, as a one page request is never in
+                        * violation.
                         */
-                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                           queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+                               blk_queue_max_segments(mddev->queue, 1);
+                               blk_queue_segment_boundary(mddev->queue,
+                                                          PAGE_CACHE_SIZE - 1);
+                       }
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
@@ -1432,6 +1437,43 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
 
 
 /*
+ * Used by fix_read_error() to decay the per rdev read_errors.
+ * We halve the read error count for every hour that has elapsed
+ * since the last recorded read error.
+ *
+ */
+static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+       struct timespec cur_time_mon;
+       unsigned long hours_since_last;
+       unsigned int read_errors = atomic_read(&rdev->read_errors);
+
+       ktime_get_ts(&cur_time_mon);
+
+       if (rdev->last_read_error.tv_sec == 0 &&
+           rdev->last_read_error.tv_nsec == 0) {
+               /* first time we've seen a read error */
+               rdev->last_read_error = cur_time_mon;
+               return;
+       }
+
+       hours_since_last = (cur_time_mon.tv_sec -
+                           rdev->last_read_error.tv_sec) / 3600;
+
+       rdev->last_read_error = cur_time_mon;
+
+       /*
+        * if hours_since_last is > the number of bits in read_errors
+        * just set read errors to 0. We do this to avoid
+        * overflowing the shift of read_errors by hours_since_last.
+        */
+       if (hours_since_last >= 8 * sizeof(read_errors))
+               atomic_set(&rdev->read_errors, 0);
+       else
+               atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+}
+
+/*
  * This is a kernel thread which:
  *
  *     1.      Retries failed read operations on working mirrors.
@@ -1444,6 +1486,43 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
        int sect = 0; /* Offset from r10_bio->sector */
        int sectors = r10_bio->sectors;
        mdk_rdev_t*rdev;
+       int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
+
+       rcu_read_lock();
+       {
+               int d = r10_bio->devs[r10_bio->read_slot].devnum;
+               char b[BDEVNAME_SIZE];
+               int cur_read_error_count = 0;
+
+               rdev = rcu_dereference(conf->mirrors[d].rdev);
+               bdevname(rdev->bdev, b);
+
+               if (test_bit(Faulty, &rdev->flags)) {
+                       rcu_read_unlock();
+                       /* drive has already been failed, just ignore any
+                          more fix_read_error() attempts */
+                       return;
+               }
+
+               check_decay_read_errors(mddev, rdev);
+               atomic_inc(&rdev->read_errors);
+               cur_read_error_count = atomic_read(&rdev->read_errors);
+               if (cur_read_error_count > max_read_errors) {
+                       rcu_read_unlock();
+                       printk(KERN_NOTICE
+                              "raid10: %s: Raid device exceeded "
+                              "read_error threshold "
+                              "[cur %d:max %d]\n",
+                              b, cur_read_error_count, max_read_errors);
+                       printk(KERN_NOTICE
+                              "raid10: %s: Failing raid "
+                              "device\n", b);
+                       md_error(mddev, conf->mirrors[d].rdev);
+                       return;
+               }
+       }
+       rcu_read_unlock();
+
        while(sectors) {
                int s = sectors;
                int sl = r10_bio->read_slot;
@@ -1488,6 +1567,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                /* write it back and re-read */
                rcu_read_lock();
                while (sl != r10_bio->read_slot) {
+                       char b[BDEVNAME_SIZE];
                        int d;
                        if (sl==0)
                                sl = conf->copies;
@@ -1503,9 +1583,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                                 r10_bio->devs[sl].addr +
                                                 sect + rdev->data_offset,
                                                 s<<9, conf->tmppage, WRITE)
-                                   == 0)
+                                   == 0) {
                                        /* Well, this device is dead */
+                                       printk(KERN_NOTICE
+                                              "raid10:%s: read correction "
+                                              "write failed"
+                                              " (%d sectors at %llu on %s)\n",
+                                              mdname(mddev), s,
+                                              (unsigned long long)(sect+
+                                              rdev->data_offset),
+                                              bdevname(rdev->bdev, b));
+                                       printk(KERN_NOTICE "raid10:%s: failing "
+                                              "drive\n",
+                                              bdevname(rdev->bdev, b));
                                        md_error(mddev, rdev);
+                               }
                                rdev_dec_pending(rdev, mddev);
                                rcu_read_lock();
                        }
@@ -1526,10 +1618,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                if (sync_page_io(rdev->bdev,
                                                 r10_bio->devs[sl].addr +
                                                 sect + rdev->data_offset,
-                                                s<<9, conf->tmppage, READ) == 0)
+                                                s<<9, conf->tmppage,
+                                                READ) == 0) {
                                        /* Well, this device is dead */
+                                       printk(KERN_NOTICE
+                                              "raid10:%s: unable to read back "
+                                              "corrected sectors"
+                                              " (%d sectors at %llu on %s)\n",
+                                              mdname(mddev), s,
+                                              (unsigned long long)(sect+
+                                                   rdev->data_offset),
+                                              bdevname(rdev->bdev, b));
+                                       printk(KERN_NOTICE "raid10:%s: failing drive\n",
+                                              bdevname(rdev->bdev, b));
+
                                        md_error(mddev, rdev);
-                               else
+                               } else {
                                        printk(KERN_INFO
                                               "raid10:%s: read error corrected"
                                               " (%d sectors at %llu on %s)\n",
@@ -1537,6 +1641,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                               (unsigned long long)(sect+
                                                    rdev->data_offset),
                                               bdevname(rdev->bdev, b));
+                               }
 
                                rdev_dec_pending(rdev, mddev);
                                rcu_read_lock();
@@ -1625,13 +1730,14 @@ static void raid10d(mddev_t *mddev)
                                bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
                                        + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
-                               bio->bi_rw = READ | do_sync;
+                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = raid10_end_read_request;
                                unplug = 1;
                                generic_make_request(bio);
                        }
                }
+               cond_resched();
        }
        if (unplug)
                unplug_slaves(mddev);
@@ -2095,7 +2201,6 @@ static int run(mddev_t *mddev)
        if (!conf->tmppage)
                goto out_free_conf;
 
-       conf->mddev = mddev;
        conf->raid_disks = mddev->raid_disks;
        conf->near_copies = nc;
        conf->far_copies = fc;
@@ -2132,6 +2237,7 @@ static int run(mddev_t *mddev)
                goto out_free_conf;
        }
 
+       conf->mddev = mddev;
        spin_lock_init(&conf->device_lock);
        mddev->queue->queue_lock = &conf->device_lock;
 
@@ -2154,12 +2260,14 @@ static int run(mddev_t *mddev)
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_sector to one PAGE, as
-                * a one page request is never in violation.
+                * violating it, so limit max_segments to 1 lying
+                * within a single page.
                 */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+                       blk_queue_max_segments(mddev->queue, 1);
+                       blk_queue_segment_boundary(mddev->queue,
+                                                  PAGE_CACHE_SIZE - 1);
+               }
 
                disk->head_position = 0;
        }
@@ -2274,13 +2382,6 @@ static void raid10_quiesce(mddev_t *mddev, int state)
                lower_barrier(conf);
                break;
        }
-       if (mddev->thread) {
-               if (mddev->bitmap)
-                       mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
-               else
-                       mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
-               md_wakeup_thread(mddev->thread);
-       }
 }
 
 static struct mdk_personality raid10_personality =
@@ -2314,6 +2415,7 @@ static void raid_exit(void)
 module_init(raid_init);
 module_exit(raid_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
 MODULE_ALIAS("md-personality-9"); /* RAID10 */
 MODULE_ALIAS("md-raid10");
 MODULE_ALIAS("md-level-10");