2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/module.h>
47 #include <linux/slab.h>
48 #include <linux/highmem.h>
49 #include <linux/bitops.h>
50 #include <linux/kthread.h>
51 #include <asm/atomic.h>
54 #include <linux/raid/bitmap.h>
60 #define NR_STRIPES 256
61 #define STRIPE_SIZE PAGE_SIZE
62 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
63 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
64 #define IO_THRESHOLD 1
65 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
66 #define HASH_MASK (NR_HASH - 1)
68 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
70 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
71 * order without overlap. There may be several bio's per stripe+device, and
72 * a bio could span several devices.
73 * When walking this list for a particular stripe+device, we must never proceed
74 * beyond a bio that extends past this device, as the next bio might no longer
76 * This macro is used to determine the 'next' bio in the list, given the sector
77 * of the current stripe+device
79 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
81 * The following can be used to debug the driver
84 #define RAID5_PARANOIA 1
85 #if RAID5_PARANOIA && defined(CONFIG_SMP)
86 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
88 # define CHECK_DEVLOCK()
91 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
97 #if !RAID6_USE_EMPTY_ZERO_PAGE
98 /* In .bss so it's zeroed */
99 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
102 static inline int raid6_next_disk(int disk, int raid_disks)
105 return (disk < raid_disks) ? disk : 0;
107 static void print_raid5_conf (raid5_conf_t *conf);
109 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
111 if (atomic_dec_and_test(&sh->count)) {
112 BUG_ON(!list_empty(&sh->lru));
113 BUG_ON(atomic_read(&conf->active_stripes)==0);
114 if (test_bit(STRIPE_HANDLE, &sh->state)) {
115 if (test_bit(STRIPE_DELAYED, &sh->state)) {
116 list_add_tail(&sh->lru, &conf->delayed_list);
117 blk_plug_device(conf->mddev->queue);
118 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
119 sh->bm_seq - conf->seq_write > 0) {
120 list_add_tail(&sh->lru, &conf->bitmap_list);
121 blk_plug_device(conf->mddev->queue);
123 clear_bit(STRIPE_BIT_DELAY, &sh->state);
124 list_add_tail(&sh->lru, &conf->handle_list);
126 md_wakeup_thread(conf->mddev->thread);
128 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
129 atomic_dec(&conf->preread_active_stripes);
130 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
131 md_wakeup_thread(conf->mddev->thread);
133 atomic_dec(&conf->active_stripes);
134 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
135 list_add_tail(&sh->lru, &conf->inactive_list);
136 wake_up(&conf->wait_for_stripe);
141 static void release_stripe(struct stripe_head *sh)
143 raid5_conf_t *conf = sh->raid_conf;
146 spin_lock_irqsave(&conf->device_lock, flags);
147 __release_stripe(conf, sh);
148 spin_unlock_irqrestore(&conf->device_lock, flags);
151 static inline void remove_hash(struct stripe_head *sh)
153 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
155 hlist_del_init(&sh->hash);
158 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
160 struct hlist_head *hp = stripe_hash(conf, sh->sector);
162 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
165 hlist_add_head(&sh->hash, hp);
169 /* find an idle stripe, make sure it is unhashed, and return it. */
170 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
172 struct stripe_head *sh = NULL;
173 struct list_head *first;
176 if (list_empty(&conf->inactive_list))
178 first = conf->inactive_list.next;
179 sh = list_entry(first, struct stripe_head, lru);
180 list_del_init(first);
182 atomic_inc(&conf->active_stripes);
187 static void shrink_buffers(struct stripe_head *sh, int num)
192 for (i=0; i<num ; i++) {
196 sh->dev[i].page = NULL;
201 static int grow_buffers(struct stripe_head *sh, int num)
205 for (i=0; i<num; i++) {
208 if (!(page = alloc_page(GFP_KERNEL))) {
211 sh->dev[i].page = page;
216 static void raid5_build_block (struct stripe_head *sh, int i);
218 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
220 raid5_conf_t *conf = sh->raid_conf;
223 BUG_ON(atomic_read(&sh->count) != 0);
224 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
227 PRINTK("init_stripe called, stripe %llu\n",
228 (unsigned long long)sh->sector);
238 for (i = sh->disks; i--; ) {
239 struct r5dev *dev = &sh->dev[i];
241 if (dev->toread || dev->towrite || dev->written ||
242 test_bit(R5_LOCKED, &dev->flags)) {
243 printk("sector=%llx i=%d %p %p %p %d\n",
244 (unsigned long long)sh->sector, i, dev->toread,
245 dev->towrite, dev->written,
246 test_bit(R5_LOCKED, &dev->flags));
250 raid5_build_block(sh, i);
252 insert_hash(conf, sh);
255 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
257 struct stripe_head *sh;
258 struct hlist_node *hn;
261 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
262 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
263 if (sh->sector == sector && sh->disks == disks)
265 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
269 static void unplug_slaves(mddev_t *mddev);
270 static void raid5_unplug_device(request_queue_t *q);
272 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
273 int pd_idx, int noblock)
275 struct stripe_head *sh;
277 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
279 spin_lock_irq(&conf->device_lock);
282 wait_event_lock_irq(conf->wait_for_stripe,
284 conf->device_lock, /* nothing */);
285 sh = __find_stripe(conf, sector, disks);
287 if (!conf->inactive_blocked)
288 sh = get_free_stripe(conf);
289 if (noblock && sh == NULL)
292 conf->inactive_blocked = 1;
293 wait_event_lock_irq(conf->wait_for_stripe,
294 !list_empty(&conf->inactive_list) &&
295 (atomic_read(&conf->active_stripes)
296 < (conf->max_nr_stripes *3/4)
297 || !conf->inactive_blocked),
299 raid5_unplug_device(conf->mddev->queue)
301 conf->inactive_blocked = 0;
303 init_stripe(sh, sector, pd_idx, disks);
305 if (atomic_read(&sh->count)) {
306 BUG_ON(!list_empty(&sh->lru));
308 if (!test_bit(STRIPE_HANDLE, &sh->state))
309 atomic_inc(&conf->active_stripes);
310 if (list_empty(&sh->lru) &&
311 !test_bit(STRIPE_EXPANDING, &sh->state))
313 list_del_init(&sh->lru);
316 } while (sh == NULL);
319 atomic_inc(&sh->count);
321 spin_unlock_irq(&conf->device_lock);
325 static int grow_one_stripe(raid5_conf_t *conf)
327 struct stripe_head *sh;
328 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
331 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
332 sh->raid_conf = conf;
333 spin_lock_init(&sh->lock);
335 if (grow_buffers(sh, conf->raid_disks)) {
336 shrink_buffers(sh, conf->raid_disks);
337 kmem_cache_free(conf->slab_cache, sh);
340 sh->disks = conf->raid_disks;
341 /* we just created an active stripe so... */
342 atomic_set(&sh->count, 1);
343 atomic_inc(&conf->active_stripes);
344 INIT_LIST_HEAD(&sh->lru);
349 static int grow_stripes(raid5_conf_t *conf, int num)
352 int devs = conf->raid_disks;
354 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
355 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
356 conf->active_name = 0;
357 sc = kmem_cache_create(conf->cache_name[conf->active_name],
358 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
362 conf->slab_cache = sc;
363 conf->pool_size = devs;
365 if (!grow_one_stripe(conf))
370 #ifdef CONFIG_MD_RAID5_RESHAPE
371 static int resize_stripes(raid5_conf_t *conf, int newsize)
373 /* Make all the stripes able to hold 'newsize' devices.
374 * New slots in each stripe get 'page' set to a new page.
376 * This happens in stages:
377 * 1/ create a new kmem_cache and allocate the required number of
379 * 2/ gather all the old stripe_heads and tranfer the pages across
380 * to the new stripe_heads. This will have the side effect of
381 * freezing the array as once all stripe_heads have been collected,
382 * no IO will be possible. Old stripe heads are freed once their
383 * pages have been transferred over, and the old kmem_cache is
384 * freed when all stripes are done.
385 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
386 * we simple return a failre status - no need to clean anything up.
387 * 4/ allocate new pages for the new slots in the new stripe_heads.
388 * If this fails, we don't bother trying the shrink the
389 * stripe_heads down again, we just leave them as they are.
390 * As each stripe_head is processed the new one is released into
393 * Once step2 is started, we cannot afford to wait for a write,
394 * so we use GFP_NOIO allocations.
396 struct stripe_head *osh, *nsh;
397 LIST_HEAD(newstripes);
398 struct disk_info *ndisks;
403 if (newsize <= conf->pool_size)
404 return 0; /* never bother to shrink */
407 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
408 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
413 for (i = conf->max_nr_stripes; i; i--) {
414 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
418 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
420 nsh->raid_conf = conf;
421 spin_lock_init(&nsh->lock);
423 list_add(&nsh->lru, &newstripes);
426 /* didn't get enough, give up */
427 while (!list_empty(&newstripes)) {
428 nsh = list_entry(newstripes.next, struct stripe_head, lru);
430 kmem_cache_free(sc, nsh);
432 kmem_cache_destroy(sc);
435 /* Step 2 - Must use GFP_NOIO now.
436 * OK, we have enough stripes, start collecting inactive
437 * stripes and copying them over
439 list_for_each_entry(nsh, &newstripes, lru) {
440 spin_lock_irq(&conf->device_lock);
441 wait_event_lock_irq(conf->wait_for_stripe,
442 !list_empty(&conf->inactive_list),
444 unplug_slaves(conf->mddev)
446 osh = get_free_stripe(conf);
447 spin_unlock_irq(&conf->device_lock);
448 atomic_set(&nsh->count, 1);
449 for(i=0; i<conf->pool_size; i++)
450 nsh->dev[i].page = osh->dev[i].page;
451 for( ; i<newsize; i++)
452 nsh->dev[i].page = NULL;
453 kmem_cache_free(conf->slab_cache, osh);
455 kmem_cache_destroy(conf->slab_cache);
458 * At this point, we are holding all the stripes so the array
459 * is completely stalled, so now is a good time to resize
462 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
464 for (i=0; i<conf->raid_disks; i++)
465 ndisks[i] = conf->disks[i];
467 conf->disks = ndisks;
471 /* Step 4, return new stripes to service */
472 while(!list_empty(&newstripes)) {
473 nsh = list_entry(newstripes.next, struct stripe_head, lru);
474 list_del_init(&nsh->lru);
475 for (i=conf->raid_disks; i < newsize; i++)
476 if (nsh->dev[i].page == NULL) {
477 struct page *p = alloc_page(GFP_NOIO);
478 nsh->dev[i].page = p;
484 /* critical section pass, GFP_NOIO no longer needed */
486 conf->slab_cache = sc;
487 conf->active_name = 1-conf->active_name;
488 conf->pool_size = newsize;
493 static int drop_one_stripe(raid5_conf_t *conf)
495 struct stripe_head *sh;
497 spin_lock_irq(&conf->device_lock);
498 sh = get_free_stripe(conf);
499 spin_unlock_irq(&conf->device_lock);
502 BUG_ON(atomic_read(&sh->count));
503 shrink_buffers(sh, conf->pool_size);
504 kmem_cache_free(conf->slab_cache, sh);
505 atomic_dec(&conf->active_stripes);
509 static void shrink_stripes(raid5_conf_t *conf)
511 while (drop_one_stripe(conf))
514 if (conf->slab_cache)
515 kmem_cache_destroy(conf->slab_cache);
516 conf->slab_cache = NULL;
519 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
522 struct stripe_head *sh = bi->bi_private;
523 raid5_conf_t *conf = sh->raid_conf;
524 int disks = sh->disks, i;
525 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
530 for (i=0 ; i<disks; i++)
531 if (bi == &sh->dev[i].req)
534 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
535 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
546 spin_lock_irqsave(&conf->device_lock, flags);
547 /* we can return a buffer if we bypassed the cache or
548 * if the top buffer is not in highmem. If there are
549 * multiple buffers, leave the extra work to
552 buffer = sh->bh_read[i];
554 (!PageHighMem(buffer->b_page)
555 || buffer->b_page == bh->b_page )
557 sh->bh_read[i] = buffer->b_reqnext;
558 buffer->b_reqnext = NULL;
561 spin_unlock_irqrestore(&conf->device_lock, flags);
562 if (sh->bh_page[i]==bh->b_page)
563 set_buffer_uptodate(bh);
565 if (buffer->b_page != bh->b_page)
566 memcpy(buffer->b_data, bh->b_data, bh->b_size);
567 buffer->b_end_io(buffer, 1);
570 set_bit(R5_UPTODATE, &sh->dev[i].flags);
572 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
573 printk(KERN_INFO "raid5: read error corrected!!\n");
574 clear_bit(R5_ReadError, &sh->dev[i].flags);
575 clear_bit(R5_ReWrite, &sh->dev[i].flags);
577 if (atomic_read(&conf->disks[i].rdev->read_errors))
578 atomic_set(&conf->disks[i].rdev->read_errors, 0);
581 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
582 atomic_inc(&conf->disks[i].rdev->read_errors);
583 if (conf->mddev->degraded)
584 printk(KERN_WARNING "raid5: read error not correctable.\n");
585 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
587 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
588 else if (atomic_read(&conf->disks[i].rdev->read_errors)
589 > conf->max_nr_stripes)
591 "raid5: Too many read errors, failing device.\n");
595 set_bit(R5_ReadError, &sh->dev[i].flags);
597 clear_bit(R5_ReadError, &sh->dev[i].flags);
598 clear_bit(R5_ReWrite, &sh->dev[i].flags);
599 md_error(conf->mddev, conf->disks[i].rdev);
602 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
604 /* must restore b_page before unlocking buffer... */
605 if (sh->bh_page[i] != bh->b_page) {
606 bh->b_page = sh->bh_page[i];
607 bh->b_data = page_address(bh->b_page);
608 clear_buffer_uptodate(bh);
611 clear_bit(R5_LOCKED, &sh->dev[i].flags);
612 set_bit(STRIPE_HANDLE, &sh->state);
617 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
620 struct stripe_head *sh = bi->bi_private;
621 raid5_conf_t *conf = sh->raid_conf;
622 int disks = sh->disks, i;
624 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
629 for (i=0 ; i<disks; i++)
630 if (bi == &sh->dev[i].req)
633 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
634 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
641 spin_lock_irqsave(&conf->device_lock, flags);
643 md_error(conf->mddev, conf->disks[i].rdev);
645 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
647 clear_bit(R5_LOCKED, &sh->dev[i].flags);
648 set_bit(STRIPE_HANDLE, &sh->state);
649 __release_stripe(conf, sh);
650 spin_unlock_irqrestore(&conf->device_lock, flags);
655 static sector_t compute_blocknr(struct stripe_head *sh, int i);
657 static void raid5_build_block (struct stripe_head *sh, int i)
659 struct r5dev *dev = &sh->dev[i];
662 dev->req.bi_io_vec = &dev->vec;
664 dev->req.bi_max_vecs++;
665 dev->vec.bv_page = dev->page;
666 dev->vec.bv_len = STRIPE_SIZE;
667 dev->vec.bv_offset = 0;
669 dev->req.bi_sector = sh->sector;
670 dev->req.bi_private = sh;
673 dev->sector = compute_blocknr(sh, i);
676 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
678 char b[BDEVNAME_SIZE];
679 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
680 PRINTK("raid5: error called\n");
682 if (!test_bit(Faulty, &rdev->flags)) {
684 if (test_bit(In_sync, &rdev->flags)) {
685 conf->working_disks--;
687 conf->failed_disks++;
688 clear_bit(In_sync, &rdev->flags);
690 * if recovery was running, make sure it aborts.
692 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
694 set_bit(Faulty, &rdev->flags);
696 "raid5: Disk failure on %s, disabling device."
697 " Operation continuing on %d devices\n",
698 bdevname(rdev->bdev,b), conf->working_disks);
703 * Input: a 'big' sector number,
704 * Output: index of the data and parity disk, and the sector # in them.
706 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
707 unsigned int data_disks, unsigned int * dd_idx,
708 unsigned int * pd_idx, raid5_conf_t *conf)
711 unsigned long chunk_number;
712 unsigned int chunk_offset;
714 int sectors_per_chunk = conf->chunk_size >> 9;
716 /* First compute the information on this sector */
719 * Compute the chunk number and the sector offset inside the chunk
721 chunk_offset = sector_div(r_sector, sectors_per_chunk);
722 chunk_number = r_sector;
723 BUG_ON(r_sector != chunk_number);
726 * Compute the stripe number
728 stripe = chunk_number / data_disks;
731 * Compute the data disk and parity disk indexes inside the stripe
733 *dd_idx = chunk_number % data_disks;
736 * Select the parity disk based on the user selected algorithm.
738 switch(conf->level) {
740 *pd_idx = data_disks;
743 switch (conf->algorithm) {
744 case ALGORITHM_LEFT_ASYMMETRIC:
745 *pd_idx = data_disks - stripe % raid_disks;
746 if (*dd_idx >= *pd_idx)
749 case ALGORITHM_RIGHT_ASYMMETRIC:
750 *pd_idx = stripe % raid_disks;
751 if (*dd_idx >= *pd_idx)
754 case ALGORITHM_LEFT_SYMMETRIC:
755 *pd_idx = data_disks - stripe % raid_disks;
756 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
758 case ALGORITHM_RIGHT_SYMMETRIC:
759 *pd_idx = stripe % raid_disks;
760 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
763 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
770 switch (conf->algorithm) {
771 case ALGORITHM_LEFT_ASYMMETRIC:
772 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
773 if (*pd_idx == raid_disks-1)
774 (*dd_idx)++; /* Q D D D P */
775 else if (*dd_idx >= *pd_idx)
776 (*dd_idx) += 2; /* D D P Q D */
778 case ALGORITHM_RIGHT_ASYMMETRIC:
779 *pd_idx = stripe % raid_disks;
780 if (*pd_idx == raid_disks-1)
781 (*dd_idx)++; /* Q D D D P */
782 else if (*dd_idx >= *pd_idx)
783 (*dd_idx) += 2; /* D D P Q D */
785 case ALGORITHM_LEFT_SYMMETRIC:
786 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
787 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
789 case ALGORITHM_RIGHT_SYMMETRIC:
790 *pd_idx = stripe % raid_disks;
791 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
794 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
801 * Finally, compute the new sector number
803 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
808 static sector_t compute_blocknr(struct stripe_head *sh, int i)
810 raid5_conf_t *conf = sh->raid_conf;
811 int raid_disks = sh->disks, data_disks = raid_disks - 1;
812 sector_t new_sector = sh->sector, check;
813 int sectors_per_chunk = conf->chunk_size >> 9;
816 int chunk_number, dummy1, dummy2, dd_idx = i;
820 chunk_offset = sector_div(new_sector, sectors_per_chunk);
822 BUG_ON(new_sector != stripe);
826 switch(conf->level) {
829 switch (conf->algorithm) {
830 case ALGORITHM_LEFT_ASYMMETRIC:
831 case ALGORITHM_RIGHT_ASYMMETRIC:
835 case ALGORITHM_LEFT_SYMMETRIC:
836 case ALGORITHM_RIGHT_SYMMETRIC:
839 i -= (sh->pd_idx + 1);
842 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
847 data_disks = raid_disks - 2;
848 if (i == raid6_next_disk(sh->pd_idx, raid_disks))
849 return 0; /* It is the Q disk */
850 switch (conf->algorithm) {
851 case ALGORITHM_LEFT_ASYMMETRIC:
852 case ALGORITHM_RIGHT_ASYMMETRIC:
853 if (sh->pd_idx == raid_disks-1)
855 else if (i > sh->pd_idx)
856 i -= 2; /* D D P Q D */
858 case ALGORITHM_LEFT_SYMMETRIC:
859 case ALGORITHM_RIGHT_SYMMETRIC:
860 if (sh->pd_idx == raid_disks-1)
866 i -= (sh->pd_idx + 2);
870 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
876 chunk_number = stripe * data_disks + i;
877 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
879 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
880 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
881 printk(KERN_ERR "compute_blocknr: map not correct\n");
890 * Copy data between a page in the stripe cache, and one or more bion
891 * The page could align with the middle of the bio, or there could be
892 * several bion, each with several bio_vecs, which cover part of the page
893 * Multiple bion are linked together on bi_next. There may be extras
894 * at the end of this list. We ignore them.
896 static void copy_data(int frombio, struct bio *bio,
900 char *pa = page_address(page);
905 if (bio->bi_sector >= sector)
906 page_offset = (signed)(bio->bi_sector - sector) * 512;
908 page_offset = (signed)(sector - bio->bi_sector) * -512;
909 bio_for_each_segment(bvl, bio, i) {
910 int len = bio_iovec_idx(bio,i)->bv_len;
914 if (page_offset < 0) {
915 b_offset = -page_offset;
916 page_offset += b_offset;
920 if (len > 0 && page_offset + len > STRIPE_SIZE)
921 clen = STRIPE_SIZE - page_offset;
925 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
927 memcpy(pa+page_offset, ba+b_offset, clen);
929 memcpy(ba+b_offset, pa+page_offset, clen);
930 __bio_kunmap_atomic(ba, KM_USER0);
932 if (clen < len) /* hit end of page */
938 #define check_xor() do { \
939 if (count == MAX_XOR_BLOCKS) { \
940 xor_block(count, STRIPE_SIZE, ptr); \
946 static void compute_block(struct stripe_head *sh, int dd_idx)
948 int i, count, disks = sh->disks;
949 void *ptr[MAX_XOR_BLOCKS], *p;
951 PRINTK("compute_block, stripe %llu, idx %d\n",
952 (unsigned long long)sh->sector, dd_idx);
954 ptr[0] = page_address(sh->dev[dd_idx].page);
955 memset(ptr[0], 0, STRIPE_SIZE);
957 for (i = disks ; i--; ) {
960 p = page_address(sh->dev[i].page);
961 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
964 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
965 " not present\n", dd_idx,
966 (unsigned long long)sh->sector, i);
971 xor_block(count, STRIPE_SIZE, ptr);
972 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
975 static void compute_parity5(struct stripe_head *sh, int method)
977 raid5_conf_t *conf = sh->raid_conf;
978 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
979 void *ptr[MAX_XOR_BLOCKS];
982 PRINTK("compute_parity5, stripe %llu, method %d\n",
983 (unsigned long long)sh->sector, method);
986 ptr[0] = page_address(sh->dev[pd_idx].page);
988 case READ_MODIFY_WRITE:
989 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
990 for (i=disks ; i-- ;) {
993 if (sh->dev[i].towrite &&
994 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
995 ptr[count++] = page_address(sh->dev[i].page);
996 chosen = sh->dev[i].towrite;
997 sh->dev[i].towrite = NULL;
999 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1000 wake_up(&conf->wait_for_overlap);
1002 BUG_ON(sh->dev[i].written);
1003 sh->dev[i].written = chosen;
1008 case RECONSTRUCT_WRITE:
1009 memset(ptr[0], 0, STRIPE_SIZE);
1010 for (i= disks; i-- ;)
1011 if (i!=pd_idx && sh->dev[i].towrite) {
1012 chosen = sh->dev[i].towrite;
1013 sh->dev[i].towrite = NULL;
1015 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1016 wake_up(&conf->wait_for_overlap);
1018 BUG_ON(sh->dev[i].written);
1019 sh->dev[i].written = chosen;
1026 xor_block(count, STRIPE_SIZE, ptr);
1030 for (i = disks; i--;)
1031 if (sh->dev[i].written) {
1032 sector_t sector = sh->dev[i].sector;
1033 struct bio *wbi = sh->dev[i].written;
1034 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1035 copy_data(1, wbi, sh->dev[i].page, sector);
1036 wbi = r5_next_bio(wbi, sector);
1039 set_bit(R5_LOCKED, &sh->dev[i].flags);
1040 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1044 case RECONSTRUCT_WRITE:
1048 ptr[count++] = page_address(sh->dev[i].page);
1052 case READ_MODIFY_WRITE:
1053 for (i = disks; i--;)
1054 if (sh->dev[i].written) {
1055 ptr[count++] = page_address(sh->dev[i].page);
1060 xor_block(count, STRIPE_SIZE, ptr);
1062 if (method != CHECK_PARITY) {
1063 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1064 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1066 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1069 static void compute_parity6(struct stripe_head *sh, int method)
1071 raid6_conf_t *conf = sh->raid_conf;
1072 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
1074 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1077 qd_idx = raid6_next_disk(pd_idx, disks);
1078 d0_idx = raid6_next_disk(qd_idx, disks);
1080 PRINTK("compute_parity, stripe %llu, method %d\n",
1081 (unsigned long long)sh->sector, method);
1084 case READ_MODIFY_WRITE:
1085 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1086 case RECONSTRUCT_WRITE:
1087 for (i= disks; i-- ;)
1088 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
1089 chosen = sh->dev[i].towrite;
1090 sh->dev[i].towrite = NULL;
1092 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1093 wake_up(&conf->wait_for_overlap);
1095 if (sh->dev[i].written) BUG();
1096 sh->dev[i].written = chosen;
1100 BUG(); /* Not implemented yet */
1103 for (i = disks; i--;)
1104 if (sh->dev[i].written) {
1105 sector_t sector = sh->dev[i].sector;
1106 struct bio *wbi = sh->dev[i].written;
1107 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1108 copy_data(1, wbi, sh->dev[i].page, sector);
1109 wbi = r5_next_bio(wbi, sector);
1112 set_bit(R5_LOCKED, &sh->dev[i].flags);
1113 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1117 // case RECONSTRUCT_WRITE:
1118 // case CHECK_PARITY:
1119 // case UPDATE_PARITY:
1120 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
1121 /* FIX: Is this ordering of drives even remotely optimal? */
1125 ptrs[count++] = page_address(sh->dev[i].page);
1126 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1127 printk("block %d/%d not uptodate on parity calc\n", i,count);
1128 i = raid6_next_disk(i, disks);
1129 } while ( i != d0_idx );
1133 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
1136 case RECONSTRUCT_WRITE:
1137 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1138 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1139 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1140 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
1143 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1144 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1150 /* Compute one missing block */
1151 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1153 raid6_conf_t *conf = sh->raid_conf;
1154 int i, count, disks = conf->raid_disks;
1155 void *ptr[MAX_XOR_BLOCKS], *p;
1156 int pd_idx = sh->pd_idx;
1157 int qd_idx = raid6_next_disk(pd_idx, disks);
1159 PRINTK("compute_block_1, stripe %llu, idx %d\n",
1160 (unsigned long long)sh->sector, dd_idx);
1162 if ( dd_idx == qd_idx ) {
1163 /* We're actually computing the Q drive */
1164 compute_parity6(sh, UPDATE_PARITY);
1166 ptr[0] = page_address(sh->dev[dd_idx].page);
1167 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
1169 for (i = disks ; i--; ) {
1170 if (i == dd_idx || i == qd_idx)
1172 p = page_address(sh->dev[i].page);
1173 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
1176 printk("compute_block() %d, stripe %llu, %d"
1177 " not present\n", dd_idx,
1178 (unsigned long long)sh->sector, i);
1183 xor_block(count, STRIPE_SIZE, ptr);
1184 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1185 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1189 /* Compute two missing blocks */
1190 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1192 raid6_conf_t *conf = sh->raid_conf;
1193 int i, count, disks = conf->raid_disks;
1194 int pd_idx = sh->pd_idx;
1195 int qd_idx = raid6_next_disk(pd_idx, disks);
1196 int d0_idx = raid6_next_disk(qd_idx, disks);
1199 /* faila and failb are disk numbers relative to d0_idx */
1200 /* pd_idx become disks-2 and qd_idx become disks-1 */
1201 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
1202 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
1204 BUG_ON(faila == failb);
1205 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
1207 PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
1208 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
1210 if ( failb == disks-1 ) {
1211 /* Q disk is one of the missing disks */
1212 if ( faila == disks-2 ) {
1213 /* Missing P+Q, just recompute */
1214 compute_parity6(sh, UPDATE_PARITY);
1217 /* We're missing D+Q; recompute D from P */
1218 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
1219 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
1224 /* We're missing D+P or D+D; build pointer table */
1226 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
1232 ptrs[count++] = page_address(sh->dev[i].page);
1233 i = raid6_next_disk(i, disks);
1234 if (i != dd_idx1 && i != dd_idx2 &&
1235 !test_bit(R5_UPTODATE, &sh->dev[i].flags))
1236 printk("compute_2 with missing block %d/%d\n", count, i);
1237 } while ( i != d0_idx );
1239 if ( failb == disks-2 ) {
1240 /* We're missing D+P. */
1241 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
1243 /* We're missing D+D. */
1244 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
1247 /* Both the above update both missing blocks */
1248 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
1249 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
1256 * Each stripe/dev can have one or more bion attached.
1257 * toread/towrite point to the first in a chain.
1258 * The bi_next chain must be in order.
1260 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
1263 raid5_conf_t *conf = sh->raid_conf;
1266 PRINTK("adding bh b#%llu to stripe s#%llu\n",
1267 (unsigned long long)bi->bi_sector,
1268 (unsigned long long)sh->sector);
1271 spin_lock(&sh->lock);
1272 spin_lock_irq(&conf->device_lock);
1274 bip = &sh->dev[dd_idx].towrite;
1275 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
1278 bip = &sh->dev[dd_idx].toread;
1279 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
1280 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
1282 bip = & (*bip)->bi_next;
1284 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
1287 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1291 bi->bi_phys_segments ++;
1292 spin_unlock_irq(&conf->device_lock);
1293 spin_unlock(&sh->lock);
1295 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
1296 (unsigned long long)bi->bi_sector,
1297 (unsigned long long)sh->sector, dd_idx);
1299 if (conf->mddev->bitmap && firstwrite) {
1300 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1302 sh->bm_seq = conf->seq_flush+1;
1303 set_bit(STRIPE_BIT_DELAY, &sh->state);
1307 /* check if page is covered */
1308 sector_t sector = sh->dev[dd_idx].sector;
1309 for (bi=sh->dev[dd_idx].towrite;
1310 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1311 bi && bi->bi_sector <= sector;
1312 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1313 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1314 sector = bi->bi_sector + (bi->bi_size>>9);
1316 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1317 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1322 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1323 spin_unlock_irq(&conf->device_lock);
1324 spin_unlock(&sh->lock);
1328 static void end_reshape(raid5_conf_t *conf);
1330 static int page_is_zero(struct page *p)
1332 char *a = page_address(p);
1333 return ((*(u32*)a) == 0 &&
1334 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1337 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1339 int sectors_per_chunk = conf->chunk_size >> 9;
1340 sector_t x = stripe;
1342 int chunk_offset = sector_div(x, sectors_per_chunk);
1344 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
1345 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
1351 * handle_stripe - do things to a stripe.
1353 * We lock the stripe and then examine the state of various bits
1354 * to see what needs to be done.
1356 * return some read request which now have data
1357 * return some write requests which are safely on disc
1358 * schedule a read on some buffers
1359 * schedule a write of some buffers
1360 * return confirmation of parity correctness
1362 * Parity calculations are done inside the stripe lock
1363 * buffers are taken off read_list or write_list, and bh_cache buffers
1364 * get BH_Lock set before the stripe lock is released.
1368 static void handle_stripe5(struct stripe_head *sh)
1370 raid5_conf_t *conf = sh->raid_conf;
1371 int disks = sh->disks;
1372 struct bio *return_bi= NULL;
1375 int syncing, expanding, expanded;
1376 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1377 int non_overwrite = 0;
1381 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
1382 (unsigned long long)sh->sector, atomic_read(&sh->count),
1385 spin_lock(&sh->lock);
1386 clear_bit(STRIPE_HANDLE, &sh->state);
1387 clear_bit(STRIPE_DELAYED, &sh->state);
1389 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1390 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1391 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
1392 /* Now to look around and see what can be done */
1395 for (i=disks; i--; ) {
1398 clear_bit(R5_Insync, &dev->flags);
1400 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1401 i, dev->flags, dev->toread, dev->towrite, dev->written);
1402 /* maybe we can reply to a read */
1403 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1404 struct bio *rbi, *rbi2;
1405 PRINTK("Return read for disc %d\n", i);
1406 spin_lock_irq(&conf->device_lock);
1409 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1410 wake_up(&conf->wait_for_overlap);
1411 spin_unlock_irq(&conf->device_lock);
1412 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1413 copy_data(0, rbi, dev->page, dev->sector);
1414 rbi2 = r5_next_bio(rbi, dev->sector);
1415 spin_lock_irq(&conf->device_lock);
1416 if (--rbi->bi_phys_segments == 0) {
1417 rbi->bi_next = return_bi;
1420 spin_unlock_irq(&conf->device_lock);
1425 /* now count some things */
1426 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1427 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1430 if (dev->toread) to_read++;
1433 if (!test_bit(R5_OVERWRITE, &dev->flags))
1436 if (dev->written) written++;
1437 rdev = rcu_dereference(conf->disks[i].rdev);
1438 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1439 /* The ReadError flag will just be confusing now */
1440 clear_bit(R5_ReadError, &dev->flags);
1441 clear_bit(R5_ReWrite, &dev->flags);
1443 if (!rdev || !test_bit(In_sync, &rdev->flags)
1444 || test_bit(R5_ReadError, &dev->flags)) {
1448 set_bit(R5_Insync, &dev->flags);
1451 PRINTK("locked=%d uptodate=%d to_read=%d"
1452 " to_write=%d failed=%d failed_num=%d\n",
1453 locked, uptodate, to_read, to_write, failed, failed_num);
1454 /* check if the array has lost two devices and, if so, some requests might
1457 if (failed > 1 && to_read+to_write+written) {
1458 for (i=disks; i--; ) {
1461 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1464 rdev = rcu_dereference(conf->disks[i].rdev);
1465 if (rdev && test_bit(In_sync, &rdev->flags))
1466 /* multiple read failures in one stripe */
1467 md_error(conf->mddev, rdev);
1471 spin_lock_irq(&conf->device_lock);
1472 /* fail all writes first */
1473 bi = sh->dev[i].towrite;
1474 sh->dev[i].towrite = NULL;
1475 if (bi) { to_write--; bitmap_end = 1; }
1477 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1478 wake_up(&conf->wait_for_overlap);
1480 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1481 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1482 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1483 if (--bi->bi_phys_segments == 0) {
1484 md_write_end(conf->mddev);
1485 bi->bi_next = return_bi;
1490 /* and fail all 'written' */
1491 bi = sh->dev[i].written;
1492 sh->dev[i].written = NULL;
1493 if (bi) bitmap_end = 1;
1494 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1495 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1496 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1497 if (--bi->bi_phys_segments == 0) {
1498 md_write_end(conf->mddev);
1499 bi->bi_next = return_bi;
1505 /* fail any reads if this device is non-operational */
1506 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1507 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1508 bi = sh->dev[i].toread;
1509 sh->dev[i].toread = NULL;
1510 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1511 wake_up(&conf->wait_for_overlap);
1513 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1514 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1515 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1516 if (--bi->bi_phys_segments == 0) {
1517 bi->bi_next = return_bi;
1523 spin_unlock_irq(&conf->device_lock);
1525 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1526 STRIPE_SECTORS, 0, 0);
1529 if (failed > 1 && syncing) {
1530 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1531 clear_bit(STRIPE_SYNCING, &sh->state);
1535 /* might be able to return some write requests if the parity block
1536 * is safe, or on a failed drive
1538 dev = &sh->dev[sh->pd_idx];
1540 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1541 test_bit(R5_UPTODATE, &dev->flags))
1542 || (failed == 1 && failed_num == sh->pd_idx))
1544 /* any written block on an uptodate or failed drive can be returned.
1545 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1546 * never LOCKED, so we don't need to test 'failed' directly.
1548 for (i=disks; i--; )
1549 if (sh->dev[i].written) {
1551 if (!test_bit(R5_LOCKED, &dev->flags) &&
1552 test_bit(R5_UPTODATE, &dev->flags) ) {
1553 /* We can return any write requests */
1554 struct bio *wbi, *wbi2;
1556 PRINTK("Return write for disc %d\n", i);
1557 spin_lock_irq(&conf->device_lock);
1559 dev->written = NULL;
1560 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1561 wbi2 = r5_next_bio(wbi, dev->sector);
1562 if (--wbi->bi_phys_segments == 0) {
1563 md_write_end(conf->mddev);
1564 wbi->bi_next = return_bi;
1569 if (dev->towrite == NULL)
1571 spin_unlock_irq(&conf->device_lock);
1573 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1575 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1580 /* Now we might consider reading some blocks, either to check/generate
1581 * parity, or to satisfy requests
1582 * or to load a block that is being partially written.
1584 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
1585 for (i=disks; i--;) {
1587 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1589 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1592 (failed && (sh->dev[failed_num].toread ||
1593 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1596 /* we would like to get this block, possibly
1597 * by computing it, but we might not be able to
1599 if (uptodate == disks-1) {
1600 PRINTK("Computing block %d\n", i);
1601 compute_block(sh, i);
1603 } else if (test_bit(R5_Insync, &dev->flags)) {
1604 set_bit(R5_LOCKED, &dev->flags);
1605 set_bit(R5_Wantread, &dev->flags);
1607 /* if I am just reading this block and we don't have
1608 a failed drive, or any pending writes then sidestep the cache */
1609 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1610 ! syncing && !failed && !to_write) {
1611 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1612 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1616 PRINTK("Reading block %d (sync=%d)\n",
1621 set_bit(STRIPE_HANDLE, &sh->state);
1624 /* now to consider writing and what else, if anything should be read */
1627 for (i=disks ; i--;) {
1628 /* would I have to read this buffer for read_modify_write */
1630 if ((dev->towrite || i == sh->pd_idx) &&
1631 (!test_bit(R5_LOCKED, &dev->flags)
1633 || sh->bh_page[i]!=bh->b_page
1636 !test_bit(R5_UPTODATE, &dev->flags)) {
1637 if (test_bit(R5_Insync, &dev->flags)
1638 /* && !(!mddev->insync && i == sh->pd_idx) */
1641 else rmw += 2*disks; /* cannot read it */
1643 /* Would I have to read this buffer for reconstruct_write */
1644 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1645 (!test_bit(R5_LOCKED, &dev->flags)
1647 || sh->bh_page[i] != bh->b_page
1650 !test_bit(R5_UPTODATE, &dev->flags)) {
1651 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1652 else rcw += 2*disks;
1655 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1656 (unsigned long long)sh->sector, rmw, rcw);
1657 set_bit(STRIPE_HANDLE, &sh->state);
1658 if (rmw < rcw && rmw > 0)
1659 /* prefer read-modify-write, but need to get some data */
1660 for (i=disks; i--;) {
1662 if ((dev->towrite || i == sh->pd_idx) &&
1663 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1664 test_bit(R5_Insync, &dev->flags)) {
1665 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1667 PRINTK("Read_old block %d for r-m-w\n", i);
1668 set_bit(R5_LOCKED, &dev->flags);
1669 set_bit(R5_Wantread, &dev->flags);
1672 set_bit(STRIPE_DELAYED, &sh->state);
1673 set_bit(STRIPE_HANDLE, &sh->state);
1677 if (rcw <= rmw && rcw > 0)
1678 /* want reconstruct write, but need to get some data */
1679 for (i=disks; i--;) {
1681 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1682 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1683 test_bit(R5_Insync, &dev->flags)) {
1684 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1686 PRINTK("Read_old block %d for Reconstruct\n", i);
1687 set_bit(R5_LOCKED, &dev->flags);
1688 set_bit(R5_Wantread, &dev->flags);
1691 set_bit(STRIPE_DELAYED, &sh->state);
1692 set_bit(STRIPE_HANDLE, &sh->state);
1696 /* now if nothing is locked, and if we have enough data, we can start a write request */
1697 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1698 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1699 PRINTK("Computing parity...\n");
1700 compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1701 /* now every locked buffer is ready to be written */
1703 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1704 PRINTK("Writing block %d\n", i);
1706 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1707 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1708 || (i==sh->pd_idx && failed == 0))
1709 set_bit(STRIPE_INSYNC, &sh->state);
1711 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1712 atomic_dec(&conf->preread_active_stripes);
1713 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1714 md_wakeup_thread(conf->mddev->thread);
1719 /* maybe we need to check and possibly fix the parity for this stripe
1720 * Any reads will already have been scheduled, so we just see if enough data
1723 if (syncing && locked == 0 &&
1724 !test_bit(STRIPE_INSYNC, &sh->state)) {
1725 set_bit(STRIPE_HANDLE, &sh->state);
1727 BUG_ON(uptodate != disks);
1728 compute_parity5(sh, CHECK_PARITY);
1730 if (page_is_zero(sh->dev[sh->pd_idx].page)) {
1731 /* parity is correct (on disc, not in buffer any more) */
1732 set_bit(STRIPE_INSYNC, &sh->state);
1734 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1735 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1736 /* don't try to repair!! */
1737 set_bit(STRIPE_INSYNC, &sh->state);
1739 compute_block(sh, sh->pd_idx);
1744 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1745 /* either failed parity check, or recovery is happening */
1747 failed_num = sh->pd_idx;
1748 dev = &sh->dev[failed_num];
1749 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1750 BUG_ON(uptodate != disks);
1752 set_bit(R5_LOCKED, &dev->flags);
1753 set_bit(R5_Wantwrite, &dev->flags);
1754 clear_bit(STRIPE_DEGRADED, &sh->state);
1756 set_bit(STRIPE_INSYNC, &sh->state);
1759 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1760 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1761 clear_bit(STRIPE_SYNCING, &sh->state);
1764 /* If the failed drive is just a ReadError, then we might need to progress
1765 * the repair/check process
1767 if (failed == 1 && ! conf->mddev->ro &&
1768 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1769 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1770 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1772 dev = &sh->dev[failed_num];
1773 if (!test_bit(R5_ReWrite, &dev->flags)) {
1774 set_bit(R5_Wantwrite, &dev->flags);
1775 set_bit(R5_ReWrite, &dev->flags);
1776 set_bit(R5_LOCKED, &dev->flags);
1779 /* let's read it back */
1780 set_bit(R5_Wantread, &dev->flags);
1781 set_bit(R5_LOCKED, &dev->flags);
1786 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
1787 /* Need to write out all blocks after computing parity */
1788 sh->disks = conf->raid_disks;
1789 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
1790 compute_parity5(sh, RECONSTRUCT_WRITE);
1791 for (i= conf->raid_disks; i--;) {
1792 set_bit(R5_LOCKED, &sh->dev[i].flags);
1794 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1796 clear_bit(STRIPE_EXPANDING, &sh->state);
1797 } else if (expanded) {
1798 clear_bit(STRIPE_EXPAND_READY, &sh->state);
1799 atomic_dec(&conf->reshape_stripes);
1800 wake_up(&conf->wait_for_overlap);
1801 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1804 if (expanding && locked == 0) {
1805 /* We have read all the blocks in this stripe and now we need to
1806 * copy some of them into a target stripe for expand.
1808 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1809 for (i=0; i< sh->disks; i++)
1810 if (i != sh->pd_idx) {
1811 int dd_idx, pd_idx, j;
1812 struct stripe_head *sh2;
1814 sector_t bn = compute_blocknr(sh, i);
1815 sector_t s = raid5_compute_sector(bn, conf->raid_disks,
1817 &dd_idx, &pd_idx, conf);
1818 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
1820 /* so far only the early blocks of this stripe
1821 * have been requested. When later blocks
1822 * get requested, we will try again
1825 if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
1826 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
1827 /* must have already done this block */
1828 release_stripe(sh2);
1831 memcpy(page_address(sh2->dev[dd_idx].page),
1832 page_address(sh->dev[i].page),
1834 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
1835 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
1836 for (j=0; j<conf->raid_disks; j++)
1837 if (j != sh2->pd_idx &&
1838 !test_bit(R5_Expanded, &sh2->dev[j].flags))
1840 if (j == conf->raid_disks) {
1841 set_bit(STRIPE_EXPAND_READY, &sh2->state);
1842 set_bit(STRIPE_HANDLE, &sh2->state);
1844 release_stripe(sh2);
1848 spin_unlock(&sh->lock);
1850 while ((bi=return_bi)) {
1851 int bytes = bi->bi_size;
1853 return_bi = bi->bi_next;
1856 bi->bi_end_io(bi, bytes, 0);
1858 for (i=disks; i-- ;) {
1862 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1864 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1869 bi = &sh->dev[i].req;
1873 bi->bi_end_io = raid5_end_write_request;
1875 bi->bi_end_io = raid5_end_read_request;
1878 rdev = rcu_dereference(conf->disks[i].rdev);
1879 if (rdev && test_bit(Faulty, &rdev->flags))
1882 atomic_inc(&rdev->nr_pending);
1886 if (syncing || expanding || expanded)
1887 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1889 bi->bi_bdev = rdev->bdev;
1890 PRINTK("for %llu schedule op %ld on disc %d\n",
1891 (unsigned long long)sh->sector, bi->bi_rw, i);
1892 atomic_inc(&sh->count);
1893 bi->bi_sector = sh->sector + rdev->data_offset;
1894 bi->bi_flags = 1 << BIO_UPTODATE;
1896 bi->bi_max_vecs = 1;
1898 bi->bi_io_vec = &sh->dev[i].vec;
1899 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1900 bi->bi_io_vec[0].bv_offset = 0;
1901 bi->bi_size = STRIPE_SIZE;
1904 test_bit(R5_ReWrite, &sh->dev[i].flags))
1905 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1906 generic_make_request(bi);
1909 set_bit(STRIPE_DEGRADED, &sh->state);
1910 PRINTK("skip op %ld on disc %d for sector %llu\n",
1911 bi->bi_rw, i, (unsigned long long)sh->sector);
1912 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1913 set_bit(STRIPE_HANDLE, &sh->state);
1918 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1920 raid6_conf_t *conf = sh->raid_conf;
1921 int disks = conf->raid_disks;
1922 struct bio *return_bi= NULL;
1926 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1927 int non_overwrite = 0;
1928 int failed_num[2] = {0, 0};
1929 struct r5dev *dev, *pdev, *qdev;
1930 int pd_idx = sh->pd_idx;
1931 int qd_idx = raid6_next_disk(pd_idx, disks);
1932 int p_failed, q_failed;
1934 PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
1935 (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
1938 spin_lock(&sh->lock);
1939 clear_bit(STRIPE_HANDLE, &sh->state);
1940 clear_bit(STRIPE_DELAYED, &sh->state);
1942 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1943 /* Now to look around and see what can be done */
1946 for (i=disks; i--; ) {
1949 clear_bit(R5_Insync, &dev->flags);
1951 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1952 i, dev->flags, dev->toread, dev->towrite, dev->written);
1953 /* maybe we can reply to a read */
1954 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1955 struct bio *rbi, *rbi2;
1956 PRINTK("Return read for disc %d\n", i);
1957 spin_lock_irq(&conf->device_lock);
1960 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1961 wake_up(&conf->wait_for_overlap);
1962 spin_unlock_irq(&conf->device_lock);
1963 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1964 copy_data(0, rbi, dev->page, dev->sector);
1965 rbi2 = r5_next_bio(rbi, dev->sector);
1966 spin_lock_irq(&conf->device_lock);
1967 if (--rbi->bi_phys_segments == 0) {
1968 rbi->bi_next = return_bi;
1971 spin_unlock_irq(&conf->device_lock);
1976 /* now count some things */
1977 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1978 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1981 if (dev->toread) to_read++;
1984 if (!test_bit(R5_OVERWRITE, &dev->flags))
1987 if (dev->written) written++;
1988 rdev = rcu_dereference(conf->disks[i].rdev);
1989 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1990 /* The ReadError flag will just be confusing now */
1991 clear_bit(R5_ReadError, &dev->flags);
1992 clear_bit(R5_ReWrite, &dev->flags);
1994 if (!rdev || !test_bit(In_sync, &rdev->flags)
1995 || test_bit(R5_ReadError, &dev->flags)) {
1997 failed_num[failed] = i;
2000 set_bit(R5_Insync, &dev->flags);
2003 PRINTK("locked=%d uptodate=%d to_read=%d"
2004 " to_write=%d failed=%d failed_num=%d,%d\n",
2005 locked, uptodate, to_read, to_write, failed,
2006 failed_num[0], failed_num[1]);
2007 /* check if the array has lost >2 devices and, if so, some requests might
2010 if (failed > 2 && to_read+to_write+written) {
2011 for (i=disks; i--; ) {
2014 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2017 rdev = rcu_dereference(conf->disks[i].rdev);
2018 if (rdev && test_bit(In_sync, &rdev->flags))
2019 /* multiple read failures in one stripe */
2020 md_error(conf->mddev, rdev);
2024 spin_lock_irq(&conf->device_lock);
2025 /* fail all writes first */
2026 bi = sh->dev[i].towrite;
2027 sh->dev[i].towrite = NULL;
2028 if (bi) { to_write--; bitmap_end = 1; }
2030 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2031 wake_up(&conf->wait_for_overlap);
2033 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
2034 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2035 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2036 if (--bi->bi_phys_segments == 0) {
2037 md_write_end(conf->mddev);
2038 bi->bi_next = return_bi;
2043 /* and fail all 'written' */
2044 bi = sh->dev[i].written;
2045 sh->dev[i].written = NULL;
2046 if (bi) bitmap_end = 1;
2047 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
2048 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2049 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2050 if (--bi->bi_phys_segments == 0) {
2051 md_write_end(conf->mddev);
2052 bi->bi_next = return_bi;
2058 /* fail any reads if this device is non-operational */
2059 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2060 test_bit(R5_ReadError, &sh->dev[i].flags)) {
2061 bi = sh->dev[i].toread;
2062 sh->dev[i].toread = NULL;
2063 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2064 wake_up(&conf->wait_for_overlap);
2066 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
2067 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2068 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2069 if (--bi->bi_phys_segments == 0) {
2070 bi->bi_next = return_bi;
2076 spin_unlock_irq(&conf->device_lock);
2078 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2079 STRIPE_SECTORS, 0, 0);
2082 if (failed > 2 && syncing) {
2083 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2084 clear_bit(STRIPE_SYNCING, &sh->state);
2089 * might be able to return some write requests if the parity blocks
2090 * are safe, or on a failed drive
2092 pdev = &sh->dev[pd_idx];
2093 p_failed = (failed >= 1 && failed_num[0] == pd_idx)
2094 || (failed >= 2 && failed_num[1] == pd_idx);
2095 qdev = &sh->dev[qd_idx];
2096 q_failed = (failed >= 1 && failed_num[0] == qd_idx)
2097 || (failed >= 2 && failed_num[1] == qd_idx);
2100 ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
2101 && !test_bit(R5_LOCKED, &pdev->flags)
2102 && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
2103 ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
2104 && !test_bit(R5_LOCKED, &qdev->flags)
2105 && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
2106 /* any written block on an uptodate or failed drive can be
2107 * returned. Note that if we 'wrote' to a failed drive,
2108 * it will be UPTODATE, but never LOCKED, so we don't need
2109 * to test 'failed' directly.
2111 for (i=disks; i--; )
2112 if (sh->dev[i].written) {
2114 if (!test_bit(R5_LOCKED, &dev->flags) &&
2115 test_bit(R5_UPTODATE, &dev->flags) ) {
2116 /* We can return any write requests */
2118 struct bio *wbi, *wbi2;
2119 PRINTK("Return write for stripe %llu disc %d\n",
2120 (unsigned long long)sh->sector, i);
2121 spin_lock_irq(&conf->device_lock);
2123 dev->written = NULL;
2124 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
2125 wbi2 = r5_next_bio(wbi, dev->sector);
2126 if (--wbi->bi_phys_segments == 0) {
2127 md_write_end(conf->mddev);
2128 wbi->bi_next = return_bi;
2133 if (dev->towrite == NULL)
2135 spin_unlock_irq(&conf->device_lock);
2137 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2139 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
2144 /* Now we might consider reading some blocks, either to check/generate
2145 * parity, or to satisfy requests
2146 * or to load a block that is being partially written.
2148 if (to_read || non_overwrite || (to_write && failed) || (syncing && (uptodate < disks))) {
2149 for (i=disks; i--;) {
2151 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
2153 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2155 (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
2156 (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
2159 /* we would like to get this block, possibly
2160 * by computing it, but we might not be able to
2162 if (uptodate == disks-1) {
2163 PRINTK("Computing stripe %llu block %d\n",
2164 (unsigned long long)sh->sector, i);
2165 compute_block_1(sh, i, 0);
2167 } else if ( uptodate == disks-2 && failed >= 2 ) {
2168 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
2170 for (other=disks; other--;) {
2173 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
2177 PRINTK("Computing stripe %llu blocks %d,%d\n",
2178 (unsigned long long)sh->sector, i, other);
2179 compute_block_2(sh, i, other);
2181 } else if (test_bit(R5_Insync, &dev->flags)) {
2182 set_bit(R5_LOCKED, &dev->flags);
2183 set_bit(R5_Wantread, &dev->flags);
2185 /* if I am just reading this block and we don't have
2186 a failed drive, or any pending writes then sidestep the cache */
2187 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
2188 ! syncing && !failed && !to_write) {
2189 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
2190 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
2194 PRINTK("Reading block %d (sync=%d)\n",
2199 set_bit(STRIPE_HANDLE, &sh->state);
2202 /* now to consider writing and what else, if anything should be read */
2204 int rcw=0, must_compute=0;
2205 for (i=disks ; i--;) {
2207 /* Would I have to read this buffer for reconstruct_write */
2208 if (!test_bit(R5_OVERWRITE, &dev->flags)
2209 && i != pd_idx && i != qd_idx
2210 && (!test_bit(R5_LOCKED, &dev->flags)
2212 || sh->bh_page[i] != bh->b_page
2215 !test_bit(R5_UPTODATE, &dev->flags)) {
2216 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2218 PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
2223 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
2224 (unsigned long long)sh->sector, rcw, must_compute);
2225 set_bit(STRIPE_HANDLE, &sh->state);
2228 /* want reconstruct write, but need to get some data */
2229 for (i=disks; i--;) {
2231 if (!test_bit(R5_OVERWRITE, &dev->flags)
2232 && !(failed == 0 && (i == pd_idx || i == qd_idx))
2233 && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
2234 test_bit(R5_Insync, &dev->flags)) {
2235 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
2237 PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
2238 (unsigned long long)sh->sector, i);
2239 set_bit(R5_LOCKED, &dev->flags);
2240 set_bit(R5_Wantread, &dev->flags);
2243 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
2244 (unsigned long long)sh->sector, i);
2245 set_bit(STRIPE_DELAYED, &sh->state);
2246 set_bit(STRIPE_HANDLE, &sh->state);
2250 /* now if nothing is locked, and if we have enough data, we can start a write request */
2251 if (locked == 0 && rcw == 0 &&
2252 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2253 if ( must_compute > 0 ) {
2254 /* We have failed blocks and need to compute them */
2257 case 1: compute_block_1(sh, failed_num[0], 0); break;
2258 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
2259 default: BUG(); /* This request should have been failed? */
2263 PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
2264 compute_parity6(sh, RECONSTRUCT_WRITE);
2265 /* now every locked buffer is ready to be written */
2267 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
2268 PRINTK("Writing stripe %llu block %d\n",
2269 (unsigned long long)sh->sector, i);
2271 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2273 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2274 set_bit(STRIPE_INSYNC, &sh->state);
2276 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2277 atomic_dec(&conf->preread_active_stripes);
2278 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
2279 md_wakeup_thread(conf->mddev->thread);
2284 /* maybe we need to check and possibly fix the parity for this stripe
2285 * Any reads will already have been scheduled, so we just see if enough data
2288 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
2289 int update_p = 0, update_q = 0;
2292 set_bit(STRIPE_HANDLE, &sh->state);
2295 BUG_ON(uptodate < disks);
2296 /* Want to check and possibly repair P and Q.
2297 * However there could be one 'failed' device, in which
2298 * case we can only check one of them, possibly using the
2299 * other to generate missing data
2302 /* If !tmp_page, we cannot do the calculations,
2303 * but as we have set STRIPE_HANDLE, we will soon be called
2304 * by stripe_handle with a tmp_page - just wait until then.
2307 if (failed == q_failed) {
2308 /* The only possible failed device holds 'Q', so it makes
2309 * sense to check P (If anything else were failed, we would
2310 * have used P to recreate it).
2312 compute_block_1(sh, pd_idx, 1);
2313 if (!page_is_zero(sh->dev[pd_idx].page)) {
2314 compute_block_1(sh,pd_idx,0);
2318 if (!q_failed && failed < 2) {
2319 /* q is not failed, and we didn't use it to generate
2320 * anything, so it makes sense to check it
2322 memcpy(page_address(tmp_page),
2323 page_address(sh->dev[qd_idx].page),
2325 compute_parity6(sh, UPDATE_PARITY);
2326 if (memcmp(page_address(tmp_page),
2327 page_address(sh->dev[qd_idx].page),
2329 clear_bit(STRIPE_INSYNC, &sh->state);
2333 if (update_p || update_q) {
2334 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2335 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2336 /* don't try to repair!! */
2337 update_p = update_q = 0;
2340 /* now write out any block on a failed drive,
2341 * or P or Q if they need it
2345 dev = &sh->dev[failed_num[1]];
2347 set_bit(R5_LOCKED, &dev->flags);
2348 set_bit(R5_Wantwrite, &dev->flags);
2351 dev = &sh->dev[failed_num[0]];
2353 set_bit(R5_LOCKED, &dev->flags);
2354 set_bit(R5_Wantwrite, &dev->flags);
2358 dev = &sh->dev[pd_idx];
2360 set_bit(R5_LOCKED, &dev->flags);
2361 set_bit(R5_Wantwrite, &dev->flags);
2364 dev = &sh->dev[qd_idx];
2366 set_bit(R5_LOCKED, &dev->flags);
2367 set_bit(R5_Wantwrite, &dev->flags);
2369 clear_bit(STRIPE_DEGRADED, &sh->state);
2371 set_bit(STRIPE_INSYNC, &sh->state);
2375 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
2376 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2377 clear_bit(STRIPE_SYNCING, &sh->state);
2380 /* If the failed drives are just a ReadError, then we might need
2381 * to progress the repair/check process
2383 if (failed <= 2 && ! conf->mddev->ro)
2384 for (i=0; i<failed;i++) {
2385 dev = &sh->dev[failed_num[i]];
2386 if (test_bit(R5_ReadError, &dev->flags)
2387 && !test_bit(R5_LOCKED, &dev->flags)
2388 && test_bit(R5_UPTODATE, &dev->flags)
2390 if (!test_bit(R5_ReWrite, &dev->flags)) {
2391 set_bit(R5_Wantwrite, &dev->flags);
2392 set_bit(R5_ReWrite, &dev->flags);
2393 set_bit(R5_LOCKED, &dev->flags);
2395 /* let's read it back */
2396 set_bit(R5_Wantread, &dev->flags);
2397 set_bit(R5_LOCKED, &dev->flags);
2401 spin_unlock(&sh->lock);
2403 while ((bi=return_bi)) {
2404 int bytes = bi->bi_size;
2406 return_bi = bi->bi_next;
2409 bi->bi_end_io(bi, bytes, 0);
2411 for (i=disks; i-- ;) {
2415 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
2417 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
2422 bi = &sh->dev[i].req;
2426 bi->bi_end_io = raid5_end_write_request;
2428 bi->bi_end_io = raid5_end_read_request;
2431 rdev = rcu_dereference(conf->disks[i].rdev);
2432 if (rdev && test_bit(Faulty, &rdev->flags))
2435 atomic_inc(&rdev->nr_pending);
2440 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
2442 bi->bi_bdev = rdev->bdev;
2443 PRINTK("for %llu schedule op %ld on disc %d\n",
2444 (unsigned long long)sh->sector, bi->bi_rw, i);
2445 atomic_inc(&sh->count);
2446 bi->bi_sector = sh->sector + rdev->data_offset;
2447 bi->bi_flags = 1 << BIO_UPTODATE;
2449 bi->bi_max_vecs = 1;
2451 bi->bi_io_vec = &sh->dev[i].vec;
2452 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
2453 bi->bi_io_vec[0].bv_offset = 0;
2454 bi->bi_size = STRIPE_SIZE;
2457 test_bit(R5_ReWrite, &sh->dev[i].flags))
2458 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2459 generic_make_request(bi);
2462 set_bit(STRIPE_DEGRADED, &sh->state);
2463 PRINTK("skip op %ld on disc %d for sector %llu\n",
2464 bi->bi_rw, i, (unsigned long long)sh->sector);
2465 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2466 set_bit(STRIPE_HANDLE, &sh->state);
2471 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
2473 if (sh->raid_conf->level == 6)
2474 handle_stripe6(sh, tmp_page);
2481 static void raid5_activate_delayed(raid5_conf_t *conf)
2483 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
2484 while (!list_empty(&conf->delayed_list)) {
2485 struct list_head *l = conf->delayed_list.next;
2486 struct stripe_head *sh;
2487 sh = list_entry(l, struct stripe_head, lru);
2489 clear_bit(STRIPE_DELAYED, &sh->state);
2490 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
2491 atomic_inc(&conf->preread_active_stripes);
2492 list_add_tail(&sh->lru, &conf->handle_list);
2497 static void activate_bit_delay(raid5_conf_t *conf)
2499 /* device_lock is held */
2500 struct list_head head;
2501 list_add(&head, &conf->bitmap_list);
2502 list_del_init(&conf->bitmap_list);
2503 while (!list_empty(&head)) {
2504 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
2505 list_del_init(&sh->lru);
2506 atomic_inc(&sh->count);
2507 __release_stripe(conf, sh);
2511 static void unplug_slaves(mddev_t *mddev)
2513 raid5_conf_t *conf = mddev_to_conf(mddev);
2517 for (i=0; i<mddev->raid_disks; i++) {
2518 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
2519 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
2520 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
2522 atomic_inc(&rdev->nr_pending);
2525 if (r_queue->unplug_fn)
2526 r_queue->unplug_fn(r_queue);
2528 rdev_dec_pending(rdev, mddev);
2535 static void raid5_unplug_device(request_queue_t *q)
2537 mddev_t *mddev = q->queuedata;
2538 raid5_conf_t *conf = mddev_to_conf(mddev);
2539 unsigned long flags;
2541 spin_lock_irqsave(&conf->device_lock, flags);
2543 if (blk_remove_plug(q)) {
2545 raid5_activate_delayed(conf);
2547 md_wakeup_thread(mddev->thread);
2549 spin_unlock_irqrestore(&conf->device_lock, flags);
2551 unplug_slaves(mddev);
2554 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
2555 sector_t *error_sector)
2557 mddev_t *mddev = q->queuedata;
2558 raid5_conf_t *conf = mddev_to_conf(mddev);
2562 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
2563 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
2564 if (rdev && !test_bit(Faulty, &rdev->flags)) {
2565 struct block_device *bdev = rdev->bdev;
2566 request_queue_t *r_queue = bdev_get_queue(bdev);
2568 if (!r_queue->issue_flush_fn)
2571 atomic_inc(&rdev->nr_pending);
2573 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
2575 rdev_dec_pending(rdev, mddev);
2584 static int make_request(request_queue_t *q, struct bio * bi)
2586 mddev_t *mddev = q->queuedata;
2587 raid5_conf_t *conf = mddev_to_conf(mddev);
2588 unsigned int dd_idx, pd_idx;
2589 sector_t new_sector;
2590 sector_t logical_sector, last_sector;
2591 struct stripe_head *sh;
2592 const int rw = bio_data_dir(bi);
2595 if (unlikely(bio_barrier(bi))) {
2596 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
2600 md_write_start(mddev, bi);
2602 disk_stat_inc(mddev->gendisk, ios[rw]);
2603 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
2605 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
2606 last_sector = bi->bi_sector + (bi->bi_size>>9);
2608 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
2610 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
2612 int disks, data_disks;
2615 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
2616 if (likely(conf->expand_progress == MaxSector))
2617 disks = conf->raid_disks;
2619 /* spinlock is needed as expand_progress may be
2620 * 64bit on a 32bit platform, and so it might be
2621 * possible to see a half-updated value
2622 * Ofcourse expand_progress could change after
2623 * the lock is dropped, so once we get a reference
2624 * to the stripe that we think it is, we will have
2627 spin_lock_irq(&conf->device_lock);
2628 disks = conf->raid_disks;
2629 if (logical_sector >= conf->expand_progress)
2630 disks = conf->previous_raid_disks;
2632 if (logical_sector >= conf->expand_lo) {
2633 spin_unlock_irq(&conf->device_lock);
2638 spin_unlock_irq(&conf->device_lock);
2640 data_disks = disks - conf->max_degraded;
2642 new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
2643 &dd_idx, &pd_idx, conf);
2644 PRINTK("raid5: make_request, sector %llu logical %llu\n",
2645 (unsigned long long)new_sector,
2646 (unsigned long long)logical_sector);
2648 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
2650 if (unlikely(conf->expand_progress != MaxSector)) {
2651 /* expansion might have moved on while waiting for a
2652 * stripe, so we must do the range check again.
2653 * Expansion could still move past after this
2654 * test, but as we are holding a reference to
2655 * 'sh', we know that if that happens,
2656 * STRIPE_EXPANDING will get set and the expansion
2657 * won't proceed until we finish with the stripe.
2660 spin_lock_irq(&conf->device_lock);
2661 if (logical_sector < conf->expand_progress &&
2662 disks == conf->previous_raid_disks)
2663 /* mismatch, need to try again */
2665 spin_unlock_irq(&conf->device_lock);
2671 /* FIXME what if we get a false positive because these
2672 * are being updated.
2674 if (logical_sector >= mddev->suspend_lo &&
2675 logical_sector < mddev->suspend_hi) {
2681 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
2682 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
2683 /* Stripe is busy expanding or
2684 * add failed due to overlap. Flush everything
2687 raid5_unplug_device(mddev->queue);
2692 finish_wait(&conf->wait_for_overlap, &w);
2693 handle_stripe(sh, NULL);
2696 /* cannot get stripe for read-ahead, just give-up */
2697 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2698 finish_wait(&conf->wait_for_overlap, &w);
2703 spin_lock_irq(&conf->device_lock);
2704 remaining = --bi->bi_phys_segments;
2705 spin_unlock_irq(&conf->device_lock);
2706 if (remaining == 0) {
2707 int bytes = bi->bi_size;
2710 md_write_end(mddev);
2712 bi->bi_end_io(bi, bytes, 0);
2717 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
2719 /* reshaping is quite different to recovery/resync so it is
2720 * handled quite separately ... here.
2722 * On each call to sync_request, we gather one chunk worth of
2723 * destination stripes and flag them as expanding.
2724 * Then we find all the source stripes and request reads.
2725 * As the reads complete, handle_stripe will copy the data
2726 * into the destination stripe and release that stripe.
2728 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2729 struct stripe_head *sh;
2731 sector_t first_sector, last_sector;
2736 sector_t writepos, safepos, gap;
2738 if (sector_nr == 0 &&
2739 conf->expand_progress != 0) {
2740 /* restarting in the middle, skip the initial sectors */
2741 sector_nr = conf->expand_progress;
2742 sector_div(sector_nr, conf->raid_disks-1);
2747 /* we update the metadata when there is more than 3Meg
2748 * in the block range (that is rather arbitrary, should
2749 * probably be time based) or when the data about to be
2750 * copied would over-write the source of the data at
2751 * the front of the range.
2752 * i.e. one new_stripe forward from expand_progress new_maps
2753 * to after where expand_lo old_maps to
2755 writepos = conf->expand_progress +
2756 conf->chunk_size/512*(conf->raid_disks-1);
2757 sector_div(writepos, conf->raid_disks-1);
2758 safepos = conf->expand_lo;
2759 sector_div(safepos, conf->previous_raid_disks-1);
2760 gap = conf->expand_progress - conf->expand_lo;
2762 if (writepos >= safepos ||
2763 gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
2764 /* Cannot proceed until we've updated the superblock... */
2765 wait_event(conf->wait_for_overlap,
2766 atomic_read(&conf->reshape_stripes)==0);
2767 mddev->reshape_position = conf->expand_progress;
2768 mddev->sb_dirty = 1;
2769 md_wakeup_thread(mddev->thread);
2770 wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
2771 kthread_should_stop());
2772 spin_lock_irq(&conf->device_lock);
2773 conf->expand_lo = mddev->reshape_position;
2774 spin_unlock_irq(&conf->device_lock);
2775 wake_up(&conf->wait_for_overlap);
2778 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
2781 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
2782 sh = get_active_stripe(conf, sector_nr+i,
2783 conf->raid_disks, pd_idx, 0);
2784 set_bit(STRIPE_EXPANDING, &sh->state);
2785 atomic_inc(&conf->reshape_stripes);
2786 /* If any of this stripe is beyond the end of the old
2787 * array, then we need to zero those blocks
2789 for (j=sh->disks; j--;) {
2791 if (j == sh->pd_idx)
2793 s = compute_blocknr(sh, j);
2794 if (s < (mddev->array_size<<1)) {
2798 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
2799 set_bit(R5_Expanded, &sh->dev[j].flags);
2800 set_bit(R5_UPTODATE, &sh->dev[j].flags);
2803 set_bit(STRIPE_EXPAND_READY, &sh->state);
2804 set_bit(STRIPE_HANDLE, &sh->state);
2808 spin_lock_irq(&conf->device_lock);
2809 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
2810 spin_unlock_irq(&conf->device_lock);
2811 /* Ok, those stripe are ready. We can start scheduling
2812 * reads on the source stripes.
2813 * The source stripes are determined by mapping the first and last
2814 * block on the destination stripes.
2816 raid_disks = conf->previous_raid_disks;
2817 data_disks = raid_disks - 1;
2819 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
2820 raid_disks, data_disks,
2821 &dd_idx, &pd_idx, conf);
2823 raid5_compute_sector((sector_nr+conf->chunk_size/512)
2824 *(conf->raid_disks-1) -1,
2825 raid_disks, data_disks,
2826 &dd_idx, &pd_idx, conf);
2827 if (last_sector >= (mddev->size<<1))
2828 last_sector = (mddev->size<<1)-1;
2829 while (first_sector <= last_sector) {
2830 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
2831 sh = get_active_stripe(conf, first_sector,
2832 conf->previous_raid_disks, pd_idx, 0);
2833 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2834 set_bit(STRIPE_HANDLE, &sh->state);
2836 first_sector += STRIPE_SECTORS;
2838 return conf->chunk_size>>9;
2841 /* FIXME go_faster isn't used */
2842 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
2844 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2845 struct stripe_head *sh;
2847 int raid_disks = conf->raid_disks;
2848 sector_t max_sector = mddev->size << 1;
2850 int still_degraded = 0;
2853 if (sector_nr >= max_sector) {
2854 /* just being told to finish up .. nothing much to do */
2855 unplug_slaves(mddev);
2856 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2861 if (mddev->curr_resync < max_sector) /* aborted */
2862 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2864 else /* completed sync */
2866 bitmap_close_sync(mddev->bitmap);
2871 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2872 return reshape_request(mddev, sector_nr, skipped);
2874 /* if there is too many failed drives and we are trying
2875 * to resync, then assert that we are finished, because there is
2876 * nothing we can do.
2878 if (mddev->degraded >= conf->max_degraded &&
2879 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2880 sector_t rv = (mddev->size << 1) - sector_nr;
2884 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2885 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2886 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
2887 /* we can skip this block, and probably more */
2888 sync_blocks /= STRIPE_SECTORS;
2890 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
2893 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
2894 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
2896 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
2897 /* make sure we don't swamp the stripe cache if someone else
2898 * is trying to get access
2900 schedule_timeout_uninterruptible(1);
2902 /* Need to check if array will still be degraded after recovery/resync
2903 * We don't need to check the 'failed' flag as when that gets set,
2906 for (i=0; i<mddev->raid_disks; i++)
2907 if (conf->disks[i].rdev == NULL)
2910 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
2912 spin_lock(&sh->lock);
2913 set_bit(STRIPE_SYNCING, &sh->state);
2914 clear_bit(STRIPE_INSYNC, &sh->state);
2915 spin_unlock(&sh->lock);
2917 handle_stripe(sh, NULL);
2920 return STRIPE_SECTORS;
2924 * This is our raid5 kernel thread.
2926 * We scan the hash table for stripes which can be handled now.
2927 * During the scan, completed stripes are saved for us by the interrupt
2928 * handler, so that they will not have to wait for our next wakeup.
2930 static void raid5d (mddev_t *mddev)
2932 struct stripe_head *sh;
2933 raid5_conf_t *conf = mddev_to_conf(mddev);
2936 PRINTK("+++ raid5d active\n");
2938 md_check_recovery(mddev);
2941 spin_lock_irq(&conf->device_lock);
2943 struct list_head *first;
2945 if (conf->seq_flush != conf->seq_write) {
2946 int seq = conf->seq_flush;
2947 spin_unlock_irq(&conf->device_lock);
2948 bitmap_unplug(mddev->bitmap);
2949 spin_lock_irq(&conf->device_lock);
2950 conf->seq_write = seq;
2951 activate_bit_delay(conf);
2954 if (list_empty(&conf->handle_list) &&
2955 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
2956 !blk_queue_plugged(mddev->queue) &&
2957 !list_empty(&conf->delayed_list))
2958 raid5_activate_delayed(conf);
2960 if (list_empty(&conf->handle_list))
2963 first = conf->handle_list.next;
2964 sh = list_entry(first, struct stripe_head, lru);
2966 list_del_init(first);
2967 atomic_inc(&sh->count);
2968 BUG_ON(atomic_read(&sh->count)!= 1);
2969 spin_unlock_irq(&conf->device_lock);
2972 handle_stripe(sh, conf->spare_page);
2975 spin_lock_irq(&conf->device_lock);
2977 PRINTK("%d stripes handled\n", handled);
2979 spin_unlock_irq(&conf->device_lock);
2981 unplug_slaves(mddev);
2983 PRINTK("--- raid5d inactive\n");
2987 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
2989 raid5_conf_t *conf = mddev_to_conf(mddev);
2991 return sprintf(page, "%d\n", conf->max_nr_stripes);
2997 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
2999 raid5_conf_t *conf = mddev_to_conf(mddev);
3002 if (len >= PAGE_SIZE)
3007 new = simple_strtoul(page, &end, 10);
3008 if (!*page || (*end && *end != '\n') )
3010 if (new <= 16 || new > 32768)
3012 while (new < conf->max_nr_stripes) {
3013 if (drop_one_stripe(conf))
3014 conf->max_nr_stripes--;
3018 while (new > conf->max_nr_stripes) {
3019 if (grow_one_stripe(conf))
3020 conf->max_nr_stripes++;
3026 static struct md_sysfs_entry
3027 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
3028 raid5_show_stripe_cache_size,
3029 raid5_store_stripe_cache_size);
3032 stripe_cache_active_show(mddev_t *mddev, char *page)
3034 raid5_conf_t *conf = mddev_to_conf(mddev);
3036 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
3041 static struct md_sysfs_entry
3042 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3044 static struct attribute *raid5_attrs[] = {
3045 &raid5_stripecache_size.attr,
3046 &raid5_stripecache_active.attr,
3049 static struct attribute_group raid5_attrs_group = {
3051 .attrs = raid5_attrs,
3054 static int run(mddev_t *mddev)
3057 int raid_disk, memory;
3059 struct disk_info *disk;
3060 struct list_head *tmp;
3062 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
3063 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
3064 mdname(mddev), mddev->level);
3068 if (mddev->reshape_position != MaxSector) {
3069 /* Check that we can continue the reshape.
3070 * Currently only disks can change, it must
3071 * increase, and we must be past the point where
3072 * a stripe over-writes itself
3074 sector_t here_new, here_old;
3077 if (mddev->new_level != mddev->level ||
3078 mddev->new_layout != mddev->layout ||
3079 mddev->new_chunk != mddev->chunk_size) {
3080 printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n",
3084 if (mddev->delta_disks <= 0) {
3085 printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n",
3089 old_disks = mddev->raid_disks - mddev->delta_disks;
3090 /* reshape_position must be on a new-stripe boundary, and one
3091 * further up in new geometry must map after here in old geometry.
3093 here_new = mddev->reshape_position;
3094 if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) {
3095 printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n");
3098 /* here_new is the stripe we will write to */
3099 here_old = mddev->reshape_position;
3100 sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1));
3101 /* here_old is the first stripe that we might need to read from */
3102 if (here_new >= here_old) {
3103 /* Reading from the same stripe as writing to - bad */
3104 printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n");
3107 printk(KERN_INFO "raid5: reshape will continue\n");
3108 /* OK, we should be able to continue; */
3112 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
3113 if ((conf = mddev->private) == NULL)
3115 if (mddev->reshape_position == MaxSector) {
3116 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks;
3118 conf->raid_disks = mddev->raid_disks;
3119 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
3122 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
3127 conf->mddev = mddev;
3129 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
3132 if (mddev->level == 6) {
3133 conf->spare_page = alloc_page(GFP_KERNEL);
3134 if (!conf->spare_page)
3137 spin_lock_init(&conf->device_lock);
3138 init_waitqueue_head(&conf->wait_for_stripe);
3139 init_waitqueue_head(&conf->wait_for_overlap);
3140 INIT_LIST_HEAD(&conf->handle_list);
3141 INIT_LIST_HEAD(&conf->delayed_list);
3142 INIT_LIST_HEAD(&conf->bitmap_list);
3143 INIT_LIST_HEAD(&conf->inactive_list);
3144 atomic_set(&conf->active_stripes, 0);
3145 atomic_set(&conf->preread_active_stripes, 0);
3147 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
3149 ITERATE_RDEV(mddev,rdev,tmp) {
3150 raid_disk = rdev->raid_disk;
3151 if (raid_disk >= conf->raid_disks
3154 disk = conf->disks + raid_disk;
3158 if (test_bit(In_sync, &rdev->flags)) {
3159 char b[BDEVNAME_SIZE];
3160 printk(KERN_INFO "raid5: device %s operational as raid"
3161 " disk %d\n", bdevname(rdev->bdev,b),
3163 conf->working_disks++;
3168 * 0 for a fully functional array, 1 or 2 for a degraded array.
3170 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
3171 conf->mddev = mddev;
3172 conf->chunk_size = mddev->chunk_size;
3173 conf->level = mddev->level;
3174 if (conf->level == 6)
3175 conf->max_degraded = 2;
3177 conf->max_degraded = 1;
3178 conf->algorithm = mddev->layout;
3179 conf->max_nr_stripes = NR_STRIPES;
3180 conf->expand_progress = mddev->reshape_position;
3182 /* device size must be a multiple of chunk size */
3183 mddev->size &= ~(mddev->chunk_size/1024 -1);
3184 mddev->resync_max_sectors = mddev->size << 1;
3186 if (conf->level == 6 && conf->raid_disks < 4) {
3187 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
3188 mdname(mddev), conf->raid_disks);
3191 if (!conf->chunk_size || conf->chunk_size % 4) {
3192 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
3193 conf->chunk_size, mdname(mddev));
3196 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
3198 "raid5: unsupported parity algorithm %d for %s\n",
3199 conf->algorithm, mdname(mddev));
3202 if (mddev->degraded > conf->max_degraded) {
3203 printk(KERN_ERR "raid5: not enough operational devices for %s"
3204 " (%d/%d failed)\n",
3205 mdname(mddev), conf->failed_disks, conf->raid_disks);
3209 if (mddev->degraded > 0 &&
3210 mddev->recovery_cp != MaxSector) {
3211 if (mddev->ok_start_degraded)
3213 "raid5: starting dirty degraded array: %s"
3214 "- data corruption possible.\n",
3218 "raid5: cannot start dirty degraded array for %s\n",
3225 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
3226 if (!mddev->thread) {
3228 "raid5: couldn't allocate thread for %s\n",
3233 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
3234 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
3235 if (grow_stripes(conf, conf->max_nr_stripes)) {
3237 "raid5: couldn't allocate %dkB for buffers\n", memory);
3238 shrink_stripes(conf);
3239 md_unregister_thread(mddev->thread);
3242 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
3243 memory, mdname(mddev));
3245 if (mddev->degraded == 0)
3246 printk("raid5: raid level %d set %s active with %d out of %d"
3247 " devices, algorithm %d\n", conf->level, mdname(mddev),
3248 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
3251 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
3252 " out of %d devices, algorithm %d\n", conf->level,
3253 mdname(mddev), mddev->raid_disks - mddev->degraded,
3254 mddev->raid_disks, conf->algorithm);
3256 print_raid5_conf(conf);
3258 if (conf->expand_progress != MaxSector) {
3259 printk("...ok start reshape thread\n");
3260 conf->expand_lo = conf->expand_progress;
3261 atomic_set(&conf->reshape_stripes, 0);
3262 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3263 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3264 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3265 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3266 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3270 /* read-ahead size must cover two whole stripes, which is
3271 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
3274 int data_disks = conf->previous_raid_disks - conf->max_degraded;
3275 int stripe = data_disks *
3276 (mddev->chunk_size / PAGE_SIZE);
3277 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3278 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3281 /* Ok, everything is just fine now */
3282 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
3284 mddev->queue->unplug_fn = raid5_unplug_device;
3285 mddev->queue->issue_flush_fn = raid5_issue_flush;
3286 mddev->array_size = mddev->size * (conf->previous_raid_disks -
3287 conf->max_degraded);
3292 print_raid5_conf(conf);
3293 safe_put_page(conf->spare_page);
3295 kfree(conf->stripe_hashtbl);
3298 mddev->private = NULL;
3299 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
3305 static int stop(mddev_t *mddev)
3307 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3309 md_unregister_thread(mddev->thread);
3310 mddev->thread = NULL;
3311 shrink_stripes(conf);
3312 kfree(conf->stripe_hashtbl);
3313 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
3314 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
3317 mddev->private = NULL;
3322 static void print_sh (struct seq_file *seq, struct stripe_head *sh)
3326 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
3327 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
3328 seq_printf(seq, "sh %llu, count %d.\n",
3329 (unsigned long long)sh->sector, atomic_read(&sh->count));
3330 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
3331 for (i = 0; i < sh->disks; i++) {
3332 seq_printf(seq, "(cache%d: %p %ld) ",
3333 i, sh->dev[i].page, sh->dev[i].flags);
3335 seq_printf(seq, "\n");
3338 static void printall (struct seq_file *seq, raid5_conf_t *conf)
3340 struct stripe_head *sh;
3341 struct hlist_node *hn;
3344 spin_lock_irq(&conf->device_lock);
3345 for (i = 0; i < NR_HASH; i++) {
3346 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
3347 if (sh->raid_conf != conf)
3352 spin_unlock_irq(&conf->device_lock);
3356 static void status (struct seq_file *seq, mddev_t *mddev)
3358 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3361 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
3362 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
3363 for (i = 0; i < conf->raid_disks; i++)
3364 seq_printf (seq, "%s",
3365 conf->disks[i].rdev &&
3366 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
3367 seq_printf (seq, "]");
3369 seq_printf (seq, "\n");
3370 printall(seq, conf);
3374 static void print_raid5_conf (raid5_conf_t *conf)
3377 struct disk_info *tmp;
3379 printk("RAID5 conf printout:\n");
3381 printk("(conf==NULL)\n");
3384 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
3385 conf->working_disks, conf->failed_disks);
3387 for (i = 0; i < conf->raid_disks; i++) {
3388 char b[BDEVNAME_SIZE];
3389 tmp = conf->disks + i;
3391 printk(" disk %d, o:%d, dev:%s\n",
3392 i, !test_bit(Faulty, &tmp->rdev->flags),
3393 bdevname(tmp->rdev->bdev,b));
3397 static int raid5_spare_active(mddev_t *mddev)
3400 raid5_conf_t *conf = mddev->private;
3401 struct disk_info *tmp;
3403 for (i = 0; i < conf->raid_disks; i++) {
3404 tmp = conf->disks + i;
3406 && !test_bit(Faulty, &tmp->rdev->flags)
3407 && !test_bit(In_sync, &tmp->rdev->flags)) {
3409 conf->failed_disks--;
3410 conf->working_disks++;
3411 set_bit(In_sync, &tmp->rdev->flags);
3414 print_raid5_conf(conf);
3418 static int raid5_remove_disk(mddev_t *mddev, int number)
3420 raid5_conf_t *conf = mddev->private;
3423 struct disk_info *p = conf->disks + number;
3425 print_raid5_conf(conf);
3428 if (test_bit(In_sync, &rdev->flags) ||
3429 atomic_read(&rdev->nr_pending)) {
3435 if (atomic_read(&rdev->nr_pending)) {
3436 /* lost the race, try later */
3443 print_raid5_conf(conf);
3447 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
3449 raid5_conf_t *conf = mddev->private;
3452 struct disk_info *p;
3454 if (mddev->degraded > conf->max_degraded)
3455 /* no point adding a device */
3459 * find the disk ... but prefer rdev->saved_raid_disk
3462 if (rdev->saved_raid_disk >= 0 &&
3463 conf->disks[rdev->saved_raid_disk].rdev == NULL)
3464 disk = rdev->saved_raid_disk;
3467 for ( ; disk < conf->raid_disks; disk++)
3468 if ((p=conf->disks + disk)->rdev == NULL) {
3469 clear_bit(In_sync, &rdev->flags);
3470 rdev->raid_disk = disk;
3472 if (rdev->saved_raid_disk != disk)
3474 rcu_assign_pointer(p->rdev, rdev);
3477 print_raid5_conf(conf);
3481 static int raid5_resize(mddev_t *mddev, sector_t sectors)
3483 /* no resync is happening, and there is enough space
3484 * on all devices, so we can resize.
3485 * We need to make sure resync covers any new space.
3486 * If the array is shrinking we should possibly wait until
3487 * any io in the removed space completes, but it hardly seems
3490 raid5_conf_t *conf = mddev_to_conf(mddev);
3492 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
3493 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1;
3494 set_capacity(mddev->gendisk, mddev->array_size << 1);
3496 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
3497 mddev->recovery_cp = mddev->size << 1;
3498 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3500 mddev->size = sectors /2;
3501 mddev->resync_max_sectors = sectors;
3505 #ifdef CONFIG_MD_RAID5_RESHAPE
3506 static int raid5_check_reshape(mddev_t *mddev)
3508 raid5_conf_t *conf = mddev_to_conf(mddev);
3511 if (mddev->delta_disks < 0 ||
3512 mddev->new_level != mddev->level)
3513 return -EINVAL; /* Cannot shrink array or change level yet */
3514 if (mddev->delta_disks == 0)
3515 return 0; /* nothing to do */
3517 /* Can only proceed if there are plenty of stripe_heads.
3518 * We need a minimum of one full stripe,, and for sensible progress
3519 * it is best to have about 4 times that.
3520 * If we require 4 times, then the default 256 4K stripe_heads will
3521 * allow for chunk sizes up to 256K, which is probably OK.
3522 * If the chunk size is greater, user-space should request more
3523 * stripe_heads first.
3525 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
3526 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
3527 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
3528 (mddev->chunk_size / STRIPE_SIZE)*4);
3532 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
3536 /* looks like we might be able to manage this */
3540 static int raid5_start_reshape(mddev_t *mddev)
3542 raid5_conf_t *conf = mddev_to_conf(mddev);
3544 struct list_head *rtmp;
3546 int added_devices = 0;
3548 if (mddev->degraded ||
3549 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3552 ITERATE_RDEV(mddev, rdev, rtmp)
3553 if (rdev->raid_disk < 0 &&
3554 !test_bit(Faulty, &rdev->flags))
3557 if (spares < mddev->delta_disks-1)
3558 /* Not enough devices even to make a degraded array
3563 atomic_set(&conf->reshape_stripes, 0);
3564 spin_lock_irq(&conf->device_lock);
3565 conf->previous_raid_disks = conf->raid_disks;
3566 conf->raid_disks += mddev->delta_disks;
3567 conf->expand_progress = 0;
3568 conf->expand_lo = 0;
3569 spin_unlock_irq(&conf->device_lock);
3571 /* Add some new drives, as many as will fit.
3572 * We know there are enough to make the newly sized array work.
3574 ITERATE_RDEV(mddev, rdev, rtmp)
3575 if (rdev->raid_disk < 0 &&
3576 !test_bit(Faulty, &rdev->flags)) {
3577 if (raid5_add_disk(mddev, rdev)) {
3579 set_bit(In_sync, &rdev->flags);
3580 conf->working_disks++;
3582 rdev->recovery_offset = 0;
3583 sprintf(nm, "rd%d", rdev->raid_disk);
3584 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3589 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
3590 mddev->raid_disks = conf->raid_disks;
3591 mddev->reshape_position = 0;
3592 mddev->sb_dirty = 1;
3594 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3595 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3596 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3597 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3598 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3600 if (!mddev->sync_thread) {
3601 mddev->recovery = 0;
3602 spin_lock_irq(&conf->device_lock);
3603 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
3604 conf->expand_progress = MaxSector;
3605 spin_unlock_irq(&conf->device_lock);
3608 md_wakeup_thread(mddev->sync_thread);
3609 md_new_event(mddev);
3614 static void end_reshape(raid5_conf_t *conf)
3616 struct block_device *bdev;
3618 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
3619 conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1);
3620 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
3621 conf->mddev->changed = 1;
3623 bdev = bdget_disk(conf->mddev->gendisk, 0);
3625 mutex_lock(&bdev->bd_inode->i_mutex);
3626 i_size_write(bdev->bd_inode, conf->mddev->array_size << 10);
3627 mutex_unlock(&bdev->bd_inode->i_mutex);
3630 spin_lock_irq(&conf->device_lock);
3631 conf->expand_progress = MaxSector;
3632 spin_unlock_irq(&conf->device_lock);
3633 conf->mddev->reshape_position = MaxSector;
3635 /* read-ahead size must cover two whole stripes, which is
3636 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
3639 int data_disks = conf->previous_raid_disks - conf->max_degraded;
3640 int stripe = data_disks *
3641 (conf->mddev->chunk_size / PAGE_SIZE);
3642 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3643 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3648 static void raid5_quiesce(mddev_t *mddev, int state)
3650 raid5_conf_t *conf = mddev_to_conf(mddev);
3653 case 2: /* resume for a suspend */
3654 wake_up(&conf->wait_for_overlap);
3657 case 1: /* stop all writes */
3658 spin_lock_irq(&conf->device_lock);
3660 wait_event_lock_irq(conf->wait_for_stripe,
3661 atomic_read(&conf->active_stripes) == 0,
3662 conf->device_lock, /* nothing */);
3663 spin_unlock_irq(&conf->device_lock);
3666 case 0: /* re-enable writes */
3667 spin_lock_irq(&conf->device_lock);
3669 wake_up(&conf->wait_for_stripe);
3670 wake_up(&conf->wait_for_overlap);
3671 spin_unlock_irq(&conf->device_lock);
3676 static struct mdk_personality raid6_personality =
3680 .owner = THIS_MODULE,
3681 .make_request = make_request,
3685 .error_handler = error,
3686 .hot_add_disk = raid5_add_disk,
3687 .hot_remove_disk= raid5_remove_disk,
3688 .spare_active = raid5_spare_active,
3689 .sync_request = sync_request,
3690 .resize = raid5_resize,
3691 .quiesce = raid5_quiesce,
3693 static struct mdk_personality raid5_personality =
3697 .owner = THIS_MODULE,
3698 .make_request = make_request,
3702 .error_handler = error,
3703 .hot_add_disk = raid5_add_disk,
3704 .hot_remove_disk= raid5_remove_disk,
3705 .spare_active = raid5_spare_active,
3706 .sync_request = sync_request,
3707 .resize = raid5_resize,
3708 #ifdef CONFIG_MD_RAID5_RESHAPE
3709 .check_reshape = raid5_check_reshape,
3710 .start_reshape = raid5_start_reshape,
3712 .quiesce = raid5_quiesce,
3715 static struct mdk_personality raid4_personality =
3719 .owner = THIS_MODULE,
3720 .make_request = make_request,
3724 .error_handler = error,
3725 .hot_add_disk = raid5_add_disk,
3726 .hot_remove_disk= raid5_remove_disk,
3727 .spare_active = raid5_spare_active,
3728 .sync_request = sync_request,
3729 .resize = raid5_resize,
3730 .quiesce = raid5_quiesce,
3733 static int __init raid5_init(void)
3737 e = raid6_select_algo();
3740 register_md_personality(&raid6_personality);
3741 register_md_personality(&raid5_personality);
3742 register_md_personality(&raid4_personality);
3746 static void raid5_exit(void)
3748 unregister_md_personality(&raid6_personality);
3749 unregister_md_personality(&raid5_personality);
3750 unregister_md_personality(&raid4_personality);
3753 module_init(raid5_init);
3754 module_exit(raid5_exit);
3755 MODULE_LICENSE("GPL");
3756 MODULE_ALIAS("md-personality-4"); /* RAID5 */
3757 MODULE_ALIAS("md-raid5");
3758 MODULE_ALIAS("md-raid4");
3759 MODULE_ALIAS("md-level-5");
3760 MODULE_ALIAS("md-level-4");
3761 MODULE_ALIAS("md-personality-8"); /* RAID6 */
3762 MODULE_ALIAS("md-raid6");
3763 MODULE_ALIAS("md-level-6");
3765 /* This used to be two separate modules, they were: */
3766 MODULE_ALIAS("raid5");
3767 MODULE_ALIAS("raid6");