2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/config.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/devfs_fs_kernel.h>
43 #include <linux/buffer_head.h> /* for invalidate_bdev */
44 #include <linux/suspend.h>
46 #include <linux/init.h>
48 #include <linux/file.h>
51 #include <linux/kmod.h>
54 #include <asm/unaligned.h>
56 #define MAJOR_NR MD_MAJOR
59 /* 63 partitions with the alternate major number (mdp) */
60 #define MdpMinorShift 6
63 #define dprintk(x...) ((void)(DEBUG && printk(x)))
67 static void autostart_arrays (int part);
70 static mdk_personality_t *pers[MAX_PERSONALITY];
71 static DEFINE_SPINLOCK(pers_lock);
74 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
75 * is 1000 KB/sec, so the extra system load does not show up that much.
76 * Increase it if you want to have more _guaranteed_ speed. Note that
77 * the RAID driver will use the maximum available bandwidth if the IO
78 * subsystem is idle. There is also an 'absolute maximum' reconstruction
79 * speed limit - in case reconstruction slows down your system despite
82 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
85 static int sysctl_speed_limit_min = 1000;
86 static int sysctl_speed_limit_max = 200000;
88 static struct ctl_table_header *raid_table_header;
90 static ctl_table raid_table[] = {
92 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
93 .procname = "speed_limit_min",
94 .data = &sysctl_speed_limit_min,
95 .maxlen = sizeof(int),
97 .proc_handler = &proc_dointvec,
100 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
101 .procname = "speed_limit_max",
102 .data = &sysctl_speed_limit_max,
103 .maxlen = sizeof(int),
105 .proc_handler = &proc_dointvec,
110 static ctl_table raid_dir_table[] = {
112 .ctl_name = DEV_RAID,
121 static ctl_table raid_root_table[] = {
127 .child = raid_dir_table,
132 static struct block_device_operations md_fops;
134 static int start_readonly;
137 * Enables to iterate over all existing md arrays
138 * all_mddevs_lock protects this list.
140 static LIST_HEAD(all_mddevs);
141 static DEFINE_SPINLOCK(all_mddevs_lock);
145 * iterates through all used mddevs in the system.
146 * We take care to grab the all_mddevs_lock whenever navigating
147 * the list, and to always hold a refcount when unlocked.
148 * Any code which breaks out of this loop while own
149 * a reference to the current mddev and must mddev_put it.
151 #define ITERATE_MDDEV(mddev,tmp) \
153 for (({ spin_lock(&all_mddevs_lock); \
154 tmp = all_mddevs.next; \
156 ({ if (tmp != &all_mddevs) \
157 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
158 spin_unlock(&all_mddevs_lock); \
159 if (mddev) mddev_put(mddev); \
160 mddev = list_entry(tmp, mddev_t, all_mddevs); \
161 tmp != &all_mddevs;}); \
162 ({ spin_lock(&all_mddevs_lock); \
167 static int md_fail_request (request_queue_t *q, struct bio *bio)
169 bio_io_error(bio, bio->bi_size);
173 static inline mddev_t *mddev_get(mddev_t *mddev)
175 atomic_inc(&mddev->active);
179 static void mddev_put(mddev_t *mddev)
181 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
183 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
184 list_del(&mddev->all_mddevs);
185 blk_put_queue(mddev->queue);
186 kobject_unregister(&mddev->kobj);
188 spin_unlock(&all_mddevs_lock);
191 static mddev_t * mddev_find(dev_t unit)
193 mddev_t *mddev, *new = NULL;
196 spin_lock(&all_mddevs_lock);
197 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
198 if (mddev->unit == unit) {
200 spin_unlock(&all_mddevs_lock);
206 list_add(&new->all_mddevs, &all_mddevs);
207 spin_unlock(&all_mddevs_lock);
210 spin_unlock(&all_mddevs_lock);
212 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
216 memset(new, 0, sizeof(*new));
219 if (MAJOR(unit) == MD_MAJOR)
220 new->md_minor = MINOR(unit);
222 new->md_minor = MINOR(unit) >> MdpMinorShift;
224 init_MUTEX(&new->reconfig_sem);
225 INIT_LIST_HEAD(&new->disks);
226 INIT_LIST_HEAD(&new->all_mddevs);
227 init_timer(&new->safemode_timer);
228 atomic_set(&new->active, 1);
229 spin_lock_init(&new->write_lock);
230 init_waitqueue_head(&new->sb_wait);
232 new->queue = blk_alloc_queue(GFP_KERNEL);
238 blk_queue_make_request(new->queue, md_fail_request);
243 static inline int mddev_lock(mddev_t * mddev)
245 return down_interruptible(&mddev->reconfig_sem);
248 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
250 down(&mddev->reconfig_sem);
253 static inline int mddev_trylock(mddev_t * mddev)
255 return down_trylock(&mddev->reconfig_sem);
258 static inline void mddev_unlock(mddev_t * mddev)
260 up(&mddev->reconfig_sem);
262 md_wakeup_thread(mddev->thread);
265 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
268 struct list_head *tmp;
270 ITERATE_RDEV(mddev,rdev,tmp) {
271 if (rdev->desc_nr == nr)
277 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
279 struct list_head *tmp;
282 ITERATE_RDEV(mddev,rdev,tmp) {
283 if (rdev->bdev->bd_dev == dev)
289 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
291 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
292 return MD_NEW_SIZE_BLOCKS(size);
295 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
299 size = rdev->sb_offset;
302 size &= ~((sector_t)chunk_size/1024 - 1);
306 static int alloc_disk_sb(mdk_rdev_t * rdev)
311 rdev->sb_page = alloc_page(GFP_KERNEL);
312 if (!rdev->sb_page) {
313 printk(KERN_ALERT "md: out of memory.\n");
320 static void free_disk_sb(mdk_rdev_t * rdev)
323 page_cache_release(rdev->sb_page);
325 rdev->sb_page = NULL;
332 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
334 mdk_rdev_t *rdev = bio->bi_private;
335 mddev_t *mddev = rdev->mddev;
339 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
340 md_error(mddev, rdev);
342 if (atomic_dec_and_test(&mddev->pending_writes))
343 wake_up(&mddev->sb_wait);
348 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
350 struct bio *bio2 = bio->bi_private;
351 mdk_rdev_t *rdev = bio2->bi_private;
352 mddev_t *mddev = rdev->mddev;
356 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
357 error == -EOPNOTSUPP) {
359 /* barriers don't appear to be supported :-( */
360 set_bit(BarriersNotsupp, &rdev->flags);
361 mddev->barriers_work = 0;
362 spin_lock_irqsave(&mddev->write_lock, flags);
363 bio2->bi_next = mddev->biolist;
364 mddev->biolist = bio2;
365 spin_unlock_irqrestore(&mddev->write_lock, flags);
366 wake_up(&mddev->sb_wait);
371 bio->bi_private = rdev;
372 return super_written(bio, bytes_done, error);
375 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
376 sector_t sector, int size, struct page *page)
378 /* write first size bytes of page to sector of rdev
379 * Increment mddev->pending_writes before returning
380 * and decrement it on completion, waking up sb_wait
381 * if zero is reached.
382 * If an error occurred, call md_error
384 * As we might need to resubmit the request if BIO_RW_BARRIER
385 * causes ENOTSUPP, we allocate a spare bio...
387 struct bio *bio = bio_alloc(GFP_NOIO, 1);
388 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
390 bio->bi_bdev = rdev->bdev;
391 bio->bi_sector = sector;
392 bio_add_page(bio, page, size, 0);
393 bio->bi_private = rdev;
394 bio->bi_end_io = super_written;
397 atomic_inc(&mddev->pending_writes);
398 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
400 rw |= (1<<BIO_RW_BARRIER);
401 rbio = bio_clone(bio, GFP_NOIO);
402 rbio->bi_private = bio;
403 rbio->bi_end_io = super_written_barrier;
404 submit_bio(rw, rbio);
409 void md_super_wait(mddev_t *mddev)
411 /* wait for all superblock writes that were scheduled to complete.
412 * if any had to be retried (due to BARRIER problems), retry them
416 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
417 if (atomic_read(&mddev->pending_writes)==0)
419 while (mddev->biolist) {
421 spin_lock_irq(&mddev->write_lock);
422 bio = mddev->biolist;
423 mddev->biolist = bio->bi_next ;
425 spin_unlock_irq(&mddev->write_lock);
426 submit_bio(bio->bi_rw, bio);
430 finish_wait(&mddev->sb_wait, &wq);
433 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
438 complete((struct completion*)bio->bi_private);
442 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
443 struct page *page, int rw)
445 struct bio *bio = bio_alloc(GFP_NOIO, 1);
446 struct completion event;
449 rw |= (1 << BIO_RW_SYNC);
452 bio->bi_sector = sector;
453 bio_add_page(bio, page, size, 0);
454 init_completion(&event);
455 bio->bi_private = &event;
456 bio->bi_end_io = bi_complete;
458 wait_for_completion(&event);
460 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
465 static int read_disk_sb(mdk_rdev_t * rdev, int size)
467 char b[BDEVNAME_SIZE];
468 if (!rdev->sb_page) {
476 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
482 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
483 bdevname(rdev->bdev,b));
487 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
489 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
490 (sb1->set_uuid1 == sb2->set_uuid1) &&
491 (sb1->set_uuid2 == sb2->set_uuid2) &&
492 (sb1->set_uuid3 == sb2->set_uuid3))
500 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
503 mdp_super_t *tmp1, *tmp2;
505 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
506 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
508 if (!tmp1 || !tmp2) {
510 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
518 * nr_disks is not constant
523 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
534 static unsigned int calc_sb_csum(mdp_super_t * sb)
536 unsigned int disk_csum, csum;
538 disk_csum = sb->sb_csum;
540 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
541 sb->sb_csum = disk_csum;
547 * Handle superblock details.
548 * We want to be able to handle multiple superblock formats
549 * so we have a common interface to them all, and an array of
550 * different handlers.
551 * We rely on user-space to write the initial superblock, and support
552 * reading and updating of superblocks.
553 * Interface methods are:
554 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
555 * loads and validates a superblock on dev.
556 * if refdev != NULL, compare superblocks on both devices
558 * 0 - dev has a superblock that is compatible with refdev
559 * 1 - dev has a superblock that is compatible and newer than refdev
560 * so dev should be used as the refdev in future
561 * -EINVAL superblock incompatible or invalid
562 * -othererror e.g. -EIO
564 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
565 * Verify that dev is acceptable into mddev.
566 * The first time, mddev->raid_disks will be 0, and data from
567 * dev should be merged in. Subsequent calls check that dev
568 * is new enough. Return 0 or -EINVAL
570 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
571 * Update the superblock for rdev with data in mddev
572 * This does not write to disc.
578 struct module *owner;
579 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
580 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
581 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
585 * load_super for 0.90.0
587 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
589 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
595 * Calculate the position of the superblock,
596 * it's at the end of the disk.
598 * It also happens to be a multiple of 4Kb.
600 sb_offset = calc_dev_sboffset(rdev->bdev);
601 rdev->sb_offset = sb_offset;
603 ret = read_disk_sb(rdev, MD_SB_BYTES);
608 bdevname(rdev->bdev, b);
609 sb = (mdp_super_t*)page_address(rdev->sb_page);
611 if (sb->md_magic != MD_SB_MAGIC) {
612 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
617 if (sb->major_version != 0 ||
618 sb->minor_version != 90) {
619 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
620 sb->major_version, sb->minor_version,
625 if (sb->raid_disks <= 0)
628 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
629 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
634 rdev->preferred_minor = sb->md_minor;
635 rdev->data_offset = 0;
636 rdev->sb_size = MD_SB_BYTES;
638 if (sb->level == LEVEL_MULTIPATH)
641 rdev->desc_nr = sb->this_disk.number;
647 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
648 if (!uuid_equal(refsb, sb)) {
649 printk(KERN_WARNING "md: %s has different UUID to %s\n",
650 b, bdevname(refdev->bdev,b2));
653 if (!sb_equal(refsb, sb)) {
654 printk(KERN_WARNING "md: %s has same UUID"
655 " but different superblock to %s\n",
656 b, bdevname(refdev->bdev, b2));
660 ev2 = md_event(refsb);
666 rdev->size = calc_dev_size(rdev, sb->chunk_size);
673 * validate_super for 0.90.0
675 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
678 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
680 rdev->raid_disk = -1;
682 if (mddev->raid_disks == 0) {
683 mddev->major_version = 0;
684 mddev->minor_version = sb->minor_version;
685 mddev->patch_version = sb->patch_version;
686 mddev->persistent = ! sb->not_persistent;
687 mddev->chunk_size = sb->chunk_size;
688 mddev->ctime = sb->ctime;
689 mddev->utime = sb->utime;
690 mddev->level = sb->level;
691 mddev->layout = sb->layout;
692 mddev->raid_disks = sb->raid_disks;
693 mddev->size = sb->size;
694 mddev->events = md_event(sb);
695 mddev->bitmap_offset = 0;
696 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
698 if (sb->state & (1<<MD_SB_CLEAN))
699 mddev->recovery_cp = MaxSector;
701 if (sb->events_hi == sb->cp_events_hi &&
702 sb->events_lo == sb->cp_events_lo) {
703 mddev->recovery_cp = sb->recovery_cp;
705 mddev->recovery_cp = 0;
708 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
709 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
710 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
711 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
713 mddev->max_disks = MD_SB_DISKS;
715 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
716 mddev->bitmap_file == NULL) {
717 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) {
718 /* FIXME use a better test */
719 printk(KERN_WARNING "md: bitmaps only support for raid1\n");
722 mddev->bitmap_offset = mddev->default_bitmap_offset;
725 } else if (mddev->pers == NULL) {
726 /* Insist on good event counter while assembling */
727 __u64 ev1 = md_event(sb);
729 if (ev1 < mddev->events)
731 } else if (mddev->bitmap) {
732 /* if adding to array with a bitmap, then we can accept an
733 * older device ... but not too old.
735 __u64 ev1 = md_event(sb);
736 if (ev1 < mddev->bitmap->events_cleared)
738 } else /* just a hot-add of a new device, leave raid_disk at -1 */
741 if (mddev->level != LEVEL_MULTIPATH) {
742 desc = sb->disks + rdev->desc_nr;
744 if (desc->state & (1<<MD_DISK_FAULTY))
745 set_bit(Faulty, &rdev->flags);
746 else if (desc->state & (1<<MD_DISK_SYNC) &&
747 desc->raid_disk < mddev->raid_disks) {
748 set_bit(In_sync, &rdev->flags);
749 rdev->raid_disk = desc->raid_disk;
751 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
752 set_bit(WriteMostly, &rdev->flags);
753 } else /* MULTIPATH are always insync */
754 set_bit(In_sync, &rdev->flags);
759 * sync_super for 0.90.0
761 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
764 struct list_head *tmp;
766 int next_spare = mddev->raid_disks;
769 /* make rdev->sb match mddev data..
772 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
773 * 3/ any empty disks < next_spare become removed
775 * disks[0] gets initialised to REMOVED because
776 * we cannot be sure from other fields if it has
777 * been initialised or not.
780 int active=0, working=0,failed=0,spare=0,nr_disks=0;
782 rdev->sb_size = MD_SB_BYTES;
784 sb = (mdp_super_t*)page_address(rdev->sb_page);
786 memset(sb, 0, sizeof(*sb));
788 sb->md_magic = MD_SB_MAGIC;
789 sb->major_version = mddev->major_version;
790 sb->minor_version = mddev->minor_version;
791 sb->patch_version = mddev->patch_version;
792 sb->gvalid_words = 0; /* ignored */
793 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
794 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
795 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
796 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
798 sb->ctime = mddev->ctime;
799 sb->level = mddev->level;
800 sb->size = mddev->size;
801 sb->raid_disks = mddev->raid_disks;
802 sb->md_minor = mddev->md_minor;
803 sb->not_persistent = !mddev->persistent;
804 sb->utime = mddev->utime;
806 sb->events_hi = (mddev->events>>32);
807 sb->events_lo = (u32)mddev->events;
811 sb->recovery_cp = mddev->recovery_cp;
812 sb->cp_events_hi = (mddev->events>>32);
813 sb->cp_events_lo = (u32)mddev->events;
814 if (mddev->recovery_cp == MaxSector)
815 sb->state = (1<< MD_SB_CLEAN);
819 sb->layout = mddev->layout;
820 sb->chunk_size = mddev->chunk_size;
822 if (mddev->bitmap && mddev->bitmap_file == NULL)
823 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
825 sb->disks[0].state = (1<<MD_DISK_REMOVED);
826 ITERATE_RDEV(mddev,rdev2,tmp) {
829 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
830 && !test_bit(Faulty, &rdev2->flags))
831 desc_nr = rdev2->raid_disk;
833 desc_nr = next_spare++;
834 rdev2->desc_nr = desc_nr;
835 d = &sb->disks[rdev2->desc_nr];
837 d->number = rdev2->desc_nr;
838 d->major = MAJOR(rdev2->bdev->bd_dev);
839 d->minor = MINOR(rdev2->bdev->bd_dev);
840 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
841 && !test_bit(Faulty, &rdev2->flags))
842 d->raid_disk = rdev2->raid_disk;
844 d->raid_disk = rdev2->desc_nr; /* compatibility */
845 if (test_bit(Faulty, &rdev2->flags)) {
846 d->state = (1<<MD_DISK_FAULTY);
848 } else if (test_bit(In_sync, &rdev2->flags)) {
849 d->state = (1<<MD_DISK_ACTIVE);
850 d->state |= (1<<MD_DISK_SYNC);
858 if (test_bit(WriteMostly, &rdev2->flags))
859 d->state |= (1<<MD_DISK_WRITEMOSTLY);
861 /* now set the "removed" and "faulty" bits on any missing devices */
862 for (i=0 ; i < mddev->raid_disks ; i++) {
863 mdp_disk_t *d = &sb->disks[i];
864 if (d->state == 0 && d->number == 0) {
867 d->state = (1<<MD_DISK_REMOVED);
868 d->state |= (1<<MD_DISK_FAULTY);
872 sb->nr_disks = nr_disks;
873 sb->active_disks = active;
874 sb->working_disks = working;
875 sb->failed_disks = failed;
876 sb->spare_disks = spare;
878 sb->this_disk = sb->disks[rdev->desc_nr];
879 sb->sb_csum = calc_sb_csum(sb);
883 * version 1 superblock
886 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
888 unsigned int disk_csum, csum;
889 unsigned long long newcsum;
890 int size = 256 + le32_to_cpu(sb->max_dev)*2;
891 unsigned int *isuper = (unsigned int*)sb;
894 disk_csum = sb->sb_csum;
897 for (i=0; size>=4; size -= 4 )
898 newcsum += le32_to_cpu(*isuper++);
901 newcsum += le16_to_cpu(*(unsigned short*) isuper);
903 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
904 sb->sb_csum = disk_csum;
905 return cpu_to_le32(csum);
908 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
910 struct mdp_superblock_1 *sb;
913 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
917 * Calculate the position of the superblock.
918 * It is always aligned to a 4K boundary and
919 * depeding on minor_version, it can be:
920 * 0: At least 8K, but less than 12K, from end of device
921 * 1: At start of device
922 * 2: 4K from start of device.
924 switch(minor_version) {
926 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
928 sb_offset &= ~(sector_t)(4*2-1);
929 /* convert from sectors to K */
941 rdev->sb_offset = sb_offset;
943 /* superblock is rarely larger than 1K, but it can be larger,
944 * and it is safe to read 4k, so we do that
946 ret = read_disk_sb(rdev, 4096);
950 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
952 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
953 sb->major_version != cpu_to_le32(1) ||
954 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
955 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
956 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
959 if (calc_sb_1_csum(sb) != sb->sb_csum) {
960 printk("md: invalid superblock checksum on %s\n",
961 bdevname(rdev->bdev,b));
964 if (le64_to_cpu(sb->data_size) < 10) {
965 printk("md: data_size too small on %s\n",
966 bdevname(rdev->bdev,b));
969 rdev->preferred_minor = 0xffff;
970 rdev->data_offset = le64_to_cpu(sb->data_offset);
972 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
973 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
974 if (rdev->sb_size & bmask)
975 rdev-> sb_size = (rdev->sb_size | bmask)+1;
981 struct mdp_superblock_1 *refsb =
982 (struct mdp_superblock_1*)page_address(refdev->sb_page);
984 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
985 sb->level != refsb->level ||
986 sb->layout != refsb->layout ||
987 sb->chunksize != refsb->chunksize) {
988 printk(KERN_WARNING "md: %s has strangely different"
989 " superblock to %s\n",
990 bdevname(rdev->bdev,b),
991 bdevname(refdev->bdev,b2));
994 ev1 = le64_to_cpu(sb->events);
995 ev2 = le64_to_cpu(refsb->events);
1001 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1003 rdev->size = rdev->sb_offset;
1004 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1006 rdev->size = le64_to_cpu(sb->data_size)/2;
1007 if (le32_to_cpu(sb->chunksize))
1008 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1012 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1014 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1016 rdev->raid_disk = -1;
1018 if (mddev->raid_disks == 0) {
1019 mddev->major_version = 1;
1020 mddev->patch_version = 0;
1021 mddev->persistent = 1;
1022 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1023 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1024 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1025 mddev->level = le32_to_cpu(sb->level);
1026 mddev->layout = le32_to_cpu(sb->layout);
1027 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1028 mddev->size = le64_to_cpu(sb->size)/2;
1029 mddev->events = le64_to_cpu(sb->events);
1030 mddev->bitmap_offset = 0;
1031 mddev->default_bitmap_offset = 0;
1032 mddev->default_bitmap_offset = 1024;
1034 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1035 memcpy(mddev->uuid, sb->set_uuid, 16);
1037 mddev->max_disks = (4096-256)/2;
1039 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1040 mddev->bitmap_file == NULL ) {
1041 if (mddev->level != 1) {
1042 printk(KERN_WARNING "md: bitmaps only supported for raid1\n");
1045 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1047 } else if (mddev->pers == NULL) {
1048 /* Insist of good event counter while assembling */
1049 __u64 ev1 = le64_to_cpu(sb->events);
1051 if (ev1 < mddev->events)
1053 } else if (mddev->bitmap) {
1054 /* If adding to array with a bitmap, then we can accept an
1055 * older device, but not too old.
1057 __u64 ev1 = le64_to_cpu(sb->events);
1058 if (ev1 < mddev->bitmap->events_cleared)
1060 } else /* just a hot-add of a new device, leave raid_disk at -1 */
1063 if (mddev->level != LEVEL_MULTIPATH) {
1065 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1066 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1068 case 0xffff: /* spare */
1070 case 0xfffe: /* faulty */
1071 set_bit(Faulty, &rdev->flags);
1074 set_bit(In_sync, &rdev->flags);
1075 rdev->raid_disk = role;
1078 if (sb->devflags & WriteMostly1)
1079 set_bit(WriteMostly, &rdev->flags);
1080 } else /* MULTIPATH are always insync */
1081 set_bit(In_sync, &rdev->flags);
1086 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1088 struct mdp_superblock_1 *sb;
1089 struct list_head *tmp;
1092 /* make rdev->sb match mddev and rdev data. */
1094 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1096 sb->feature_map = 0;
1098 memset(sb->pad1, 0, sizeof(sb->pad1));
1099 memset(sb->pad2, 0, sizeof(sb->pad2));
1100 memset(sb->pad3, 0, sizeof(sb->pad3));
1102 sb->utime = cpu_to_le64((__u64)mddev->utime);
1103 sb->events = cpu_to_le64(mddev->events);
1105 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1107 sb->resync_offset = cpu_to_le64(0);
1109 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1110 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1111 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1115 ITERATE_RDEV(mddev,rdev2,tmp)
1116 if (rdev2->desc_nr+1 > max_dev)
1117 max_dev = rdev2->desc_nr+1;
1119 sb->max_dev = cpu_to_le32(max_dev);
1120 for (i=0; i<max_dev;i++)
1121 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1123 ITERATE_RDEV(mddev,rdev2,tmp) {
1125 if (test_bit(Faulty, &rdev2->flags))
1126 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1127 else if (test_bit(In_sync, &rdev2->flags))
1128 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1130 sb->dev_roles[i] = cpu_to_le16(0xffff);
1133 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1134 sb->sb_csum = calc_sb_1_csum(sb);
1138 static struct super_type super_types[] = {
1141 .owner = THIS_MODULE,
1142 .load_super = super_90_load,
1143 .validate_super = super_90_validate,
1144 .sync_super = super_90_sync,
1148 .owner = THIS_MODULE,
1149 .load_super = super_1_load,
1150 .validate_super = super_1_validate,
1151 .sync_super = super_1_sync,
1155 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1157 struct list_head *tmp;
1160 ITERATE_RDEV(mddev,rdev,tmp)
1161 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1167 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1169 struct list_head *tmp;
1172 ITERATE_RDEV(mddev1,rdev,tmp)
1173 if (match_dev_unit(mddev2, rdev))
1179 static LIST_HEAD(pending_raid_disks);
1181 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1183 mdk_rdev_t *same_pdev;
1184 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1190 same_pdev = match_dev_unit(mddev, rdev);
1193 "%s: WARNING: %s appears to be on the same physical"
1194 " disk as %s. True\n protection against single-disk"
1195 " failure might be compromised.\n",
1196 mdname(mddev), bdevname(rdev->bdev,b),
1197 bdevname(same_pdev->bdev,b2));
1199 /* Verify rdev->desc_nr is unique.
1200 * If it is -1, assign a free number, else
1201 * check number is not in use
1203 if (rdev->desc_nr < 0) {
1205 if (mddev->pers) choice = mddev->raid_disks;
1206 while (find_rdev_nr(mddev, choice))
1208 rdev->desc_nr = choice;
1210 if (find_rdev_nr(mddev, rdev->desc_nr))
1213 bdevname(rdev->bdev,b);
1214 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1217 list_add(&rdev->same_set, &mddev->disks);
1218 rdev->mddev = mddev;
1219 printk(KERN_INFO "md: bind<%s>\n", b);
1221 rdev->kobj.parent = &mddev->kobj;
1222 kobject_add(&rdev->kobj);
1224 sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block");
1228 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1230 char b[BDEVNAME_SIZE];
1235 list_del_init(&rdev->same_set);
1236 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1238 sysfs_remove_link(&rdev->kobj, "block");
1239 kobject_del(&rdev->kobj);
1243 * prevent the device from being mounted, repartitioned or
1244 * otherwise reused by a RAID array (or any other kernel
1245 * subsystem), by bd_claiming the device.
1247 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1250 struct block_device *bdev;
1251 char b[BDEVNAME_SIZE];
1253 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1255 printk(KERN_ERR "md: could not open %s.\n",
1256 __bdevname(dev, b));
1257 return PTR_ERR(bdev);
1259 err = bd_claim(bdev, rdev);
1261 printk(KERN_ERR "md: could not bd_claim %s.\n",
1270 static void unlock_rdev(mdk_rdev_t *rdev)
1272 struct block_device *bdev = rdev->bdev;
1280 void md_autodetect_dev(dev_t dev);
1282 static void export_rdev(mdk_rdev_t * rdev)
1284 char b[BDEVNAME_SIZE];
1285 printk(KERN_INFO "md: export_rdev(%s)\n",
1286 bdevname(rdev->bdev,b));
1290 list_del_init(&rdev->same_set);
1292 md_autodetect_dev(rdev->bdev->bd_dev);
1295 kobject_put(&rdev->kobj);
1298 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1300 unbind_rdev_from_array(rdev);
1304 static void export_array(mddev_t *mddev)
1306 struct list_head *tmp;
1309 ITERATE_RDEV(mddev,rdev,tmp) {
1314 kick_rdev_from_array(rdev);
1316 if (!list_empty(&mddev->disks))
1318 mddev->raid_disks = 0;
1319 mddev->major_version = 0;
1322 static void print_desc(mdp_disk_t *desc)
1324 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1325 desc->major,desc->minor,desc->raid_disk,desc->state);
1328 static void print_sb(mdp_super_t *sb)
1333 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1334 sb->major_version, sb->minor_version, sb->patch_version,
1335 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1337 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1338 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1339 sb->md_minor, sb->layout, sb->chunk_size);
1340 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1341 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1342 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1343 sb->failed_disks, sb->spare_disks,
1344 sb->sb_csum, (unsigned long)sb->events_lo);
1347 for (i = 0; i < MD_SB_DISKS; i++) {
1350 desc = sb->disks + i;
1351 if (desc->number || desc->major || desc->minor ||
1352 desc->raid_disk || (desc->state && (desc->state != 4))) {
1353 printk(" D %2d: ", i);
1357 printk(KERN_INFO "md: THIS: ");
1358 print_desc(&sb->this_disk);
1362 static void print_rdev(mdk_rdev_t *rdev)
1364 char b[BDEVNAME_SIZE];
1365 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1366 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1367 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1369 if (rdev->sb_loaded) {
1370 printk(KERN_INFO "md: rdev superblock:\n");
1371 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1373 printk(KERN_INFO "md: no rdev superblock!\n");
1376 void md_print_devices(void)
1378 struct list_head *tmp, *tmp2;
1381 char b[BDEVNAME_SIZE];
1384 printk("md: **********************************\n");
1385 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1386 printk("md: **********************************\n");
1387 ITERATE_MDDEV(mddev,tmp) {
1390 bitmap_print_sb(mddev->bitmap);
1392 printk("%s: ", mdname(mddev));
1393 ITERATE_RDEV(mddev,rdev,tmp2)
1394 printk("<%s>", bdevname(rdev->bdev,b));
1397 ITERATE_RDEV(mddev,rdev,tmp2)
1400 printk("md: **********************************\n");
1405 static void sync_sbs(mddev_t * mddev)
1408 struct list_head *tmp;
1410 ITERATE_RDEV(mddev,rdev,tmp) {
1411 super_types[mddev->major_version].
1412 sync_super(mddev, rdev);
1413 rdev->sb_loaded = 1;
1417 static void md_update_sb(mddev_t * mddev)
1420 struct list_head *tmp;
1425 spin_lock_irq(&mddev->write_lock);
1426 sync_req = mddev->in_sync;
1427 mddev->utime = get_seconds();
1430 if (!mddev->events) {
1432 * oops, this 64-bit counter should never wrap.
1433 * Either we are in around ~1 trillion A.C., assuming
1434 * 1 reboot per second, or we have a bug:
1439 mddev->sb_dirty = 2;
1443 * do not write anything to disk if using
1444 * nonpersistent superblocks
1446 if (!mddev->persistent) {
1447 mddev->sb_dirty = 0;
1448 spin_unlock_irq(&mddev->write_lock);
1449 wake_up(&mddev->sb_wait);
1452 spin_unlock_irq(&mddev->write_lock);
1455 "md: updating %s RAID superblock on device (in sync %d)\n",
1456 mdname(mddev),mddev->in_sync);
1458 err = bitmap_update_sb(mddev->bitmap);
1459 ITERATE_RDEV(mddev,rdev,tmp) {
1460 char b[BDEVNAME_SIZE];
1461 dprintk(KERN_INFO "md: ");
1462 if (test_bit(Faulty, &rdev->flags))
1463 dprintk("(skipping faulty ");
1465 dprintk("%s ", bdevname(rdev->bdev,b));
1466 if (!test_bit(Faulty, &rdev->flags)) {
1467 md_super_write(mddev,rdev,
1468 rdev->sb_offset<<1, rdev->sb_size,
1470 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1471 bdevname(rdev->bdev,b),
1472 (unsigned long long)rdev->sb_offset);
1476 if (mddev->level == LEVEL_MULTIPATH)
1477 /* only need to write one superblock... */
1480 md_super_wait(mddev);
1481 /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1483 spin_lock_irq(&mddev->write_lock);
1484 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
1485 /* have to write it out again */
1486 spin_unlock_irq(&mddev->write_lock);
1489 mddev->sb_dirty = 0;
1490 spin_unlock_irq(&mddev->write_lock);
1491 wake_up(&mddev->sb_wait);
1495 struct rdev_sysfs_entry {
1496 struct attribute attr;
1497 ssize_t (*show)(mdk_rdev_t *, char *);
1498 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1502 rdev_show_state(mdk_rdev_t *rdev, char *page)
1507 if (test_bit(Faulty, &rdev->flags)) {
1508 len+= sprintf(page+len, "%sfaulty",sep);
1511 if (test_bit(In_sync, &rdev->flags)) {
1512 len += sprintf(page+len, "%sin_sync",sep);
1515 if (!test_bit(Faulty, &rdev->flags) &&
1516 !test_bit(In_sync, &rdev->flags)) {
1517 len += sprintf(page+len, "%sspare", sep);
1520 return len+sprintf(page+len, "\n");
1523 static struct rdev_sysfs_entry rdev_state = {
1524 .attr = {.name = "state", .mode = S_IRUGO },
1525 .show = rdev_show_state,
1529 rdev_show_super(mdk_rdev_t *rdev, char *page)
1531 if (rdev->sb_loaded && rdev->sb_size) {
1532 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1533 return rdev->sb_size;
1537 static struct rdev_sysfs_entry rdev_super = {
1538 .attr = {.name = "super", .mode = S_IRUGO },
1539 .show = rdev_show_super,
1541 static struct attribute *rdev_default_attrs[] = {
1547 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1549 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1550 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1554 return entry->show(rdev, page);
1558 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1559 const char *page, size_t length)
1561 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1562 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1566 return entry->store(rdev, page, length);
1569 static void rdev_free(struct kobject *ko)
1571 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1574 static struct sysfs_ops rdev_sysfs_ops = {
1575 .show = rdev_attr_show,
1576 .store = rdev_attr_store,
1578 static struct kobj_type rdev_ktype = {
1579 .release = rdev_free,
1580 .sysfs_ops = &rdev_sysfs_ops,
1581 .default_attrs = rdev_default_attrs,
1585 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1587 * mark the device faulty if:
1589 * - the device is nonexistent (zero size)
1590 * - the device has no valid superblock
1592 * a faulty rdev _never_ has rdev->sb set.
1594 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1596 char b[BDEVNAME_SIZE];
1601 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1603 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1604 return ERR_PTR(-ENOMEM);
1606 memset(rdev, 0, sizeof(*rdev));
1608 if ((err = alloc_disk_sb(rdev)))
1611 err = lock_rdev(rdev, newdev);
1615 rdev->kobj.parent = NULL;
1616 rdev->kobj.ktype = &rdev_ktype;
1617 kobject_init(&rdev->kobj);
1621 rdev->data_offset = 0;
1622 atomic_set(&rdev->nr_pending, 0);
1623 atomic_set(&rdev->read_errors, 0);
1625 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1628 "md: %s has zero or unknown size, marking faulty!\n",
1629 bdevname(rdev->bdev,b));
1634 if (super_format >= 0) {
1635 err = super_types[super_format].
1636 load_super(rdev, NULL, super_minor);
1637 if (err == -EINVAL) {
1639 "md: %s has invalid sb, not importing!\n",
1640 bdevname(rdev->bdev,b));
1645 "md: could not read %s's sb, not importing!\n",
1646 bdevname(rdev->bdev,b));
1650 INIT_LIST_HEAD(&rdev->same_set);
1655 if (rdev->sb_page) {
1661 return ERR_PTR(err);
1665 * Check a full RAID array for plausibility
1669 static void analyze_sbs(mddev_t * mddev)
1672 struct list_head *tmp;
1673 mdk_rdev_t *rdev, *freshest;
1674 char b[BDEVNAME_SIZE];
1677 ITERATE_RDEV(mddev,rdev,tmp)
1678 switch (super_types[mddev->major_version].
1679 load_super(rdev, freshest, mddev->minor_version)) {
1687 "md: fatal superblock inconsistency in %s"
1688 " -- removing from array\n",
1689 bdevname(rdev->bdev,b));
1690 kick_rdev_from_array(rdev);
1694 super_types[mddev->major_version].
1695 validate_super(mddev, freshest);
1698 ITERATE_RDEV(mddev,rdev,tmp) {
1699 if (rdev != freshest)
1700 if (super_types[mddev->major_version].
1701 validate_super(mddev, rdev)) {
1702 printk(KERN_WARNING "md: kicking non-fresh %s"
1704 bdevname(rdev->bdev,b));
1705 kick_rdev_from_array(rdev);
1708 if (mddev->level == LEVEL_MULTIPATH) {
1709 rdev->desc_nr = i++;
1710 rdev->raid_disk = rdev->desc_nr;
1711 set_bit(In_sync, &rdev->flags);
1717 if (mddev->recovery_cp != MaxSector &&
1719 printk(KERN_ERR "md: %s: raid array is not clean"
1720 " -- starting background reconstruction\n",
1726 md_show_level(mddev_t *mddev, char *page)
1728 mdk_personality_t *p = mddev->pers;
1731 if (mddev->level >= 0)
1732 return sprintf(page, "RAID-%d\n", mddev->level);
1734 return sprintf(page, "%s\n", p->name);
1737 static struct md_sysfs_entry md_level = {
1738 .attr = {.name = "level", .mode = S_IRUGO },
1739 .show = md_show_level,
1743 md_show_rdisks(mddev_t *mddev, char *page)
1745 return sprintf(page, "%d\n", mddev->raid_disks);
1748 static struct md_sysfs_entry md_raid_disks = {
1749 .attr = {.name = "raid_disks", .mode = S_IRUGO },
1750 .show = md_show_rdisks,
1754 md_show_scan(mddev_t *mddev, char *page)
1756 char *type = "none";
1757 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1758 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
1759 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1760 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1762 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1769 return sprintf(page, "%s\n", type);
1773 md_store_scan(mddev_t *mddev, const char *page, size_t len)
1777 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1778 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1780 down(&mddev->reconfig_sem);
1781 if (mddev->pers && mddev->pers->sync_request)
1783 up(&mddev->reconfig_sem);
1787 if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
1788 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1789 else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
1791 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1792 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1793 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1794 md_wakeup_thread(mddev->thread);
1799 md_show_mismatch(mddev_t *mddev, char *page)
1801 return sprintf(page, "%llu\n",
1802 (unsigned long long) mddev->resync_mismatches);
1805 static struct md_sysfs_entry md_scan_mode = {
1806 .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR },
1807 .show = md_show_scan,
1808 .store = md_store_scan,
1811 static struct md_sysfs_entry md_mismatches = {
1812 .attr = {.name = "mismatch_cnt", .mode = S_IRUGO },
1813 .show = md_show_mismatch,
1816 static struct attribute *md_default_attrs[] = {
1818 &md_raid_disks.attr,
1820 &md_mismatches.attr,
1825 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1827 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
1828 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
1832 return entry->show(mddev, page);
1836 md_attr_store(struct kobject *kobj, struct attribute *attr,
1837 const char *page, size_t length)
1839 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
1840 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
1844 return entry->store(mddev, page, length);
1847 static void md_free(struct kobject *ko)
1849 mddev_t *mddev = container_of(ko, mddev_t, kobj);
1853 static struct sysfs_ops md_sysfs_ops = {
1854 .show = md_attr_show,
1855 .store = md_attr_store,
1857 static struct kobj_type md_ktype = {
1859 .sysfs_ops = &md_sysfs_ops,
1860 .default_attrs = md_default_attrs,
1865 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1867 static DECLARE_MUTEX(disks_sem);
1868 mddev_t *mddev = mddev_find(dev);
1869 struct gendisk *disk;
1870 int partitioned = (MAJOR(dev) != MD_MAJOR);
1871 int shift = partitioned ? MdpMinorShift : 0;
1872 int unit = MINOR(dev) >> shift;
1878 if (mddev->gendisk) {
1883 disk = alloc_disk(1 << shift);
1889 disk->major = MAJOR(dev);
1890 disk->first_minor = unit << shift;
1892 sprintf(disk->disk_name, "md_d%d", unit);
1893 sprintf(disk->devfs_name, "md/d%d", unit);
1895 sprintf(disk->disk_name, "md%d", unit);
1896 sprintf(disk->devfs_name, "md/%d", unit);
1898 disk->fops = &md_fops;
1899 disk->private_data = mddev;
1900 disk->queue = mddev->queue;
1902 mddev->gendisk = disk;
1904 mddev->kobj.parent = &disk->kobj;
1905 mddev->kobj.k_name = NULL;
1906 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
1907 mddev->kobj.ktype = &md_ktype;
1908 kobject_register(&mddev->kobj);
1912 void md_wakeup_thread(mdk_thread_t *thread);
1914 static void md_safemode_timeout(unsigned long data)
1916 mddev_t *mddev = (mddev_t *) data;
1918 mddev->safemode = 1;
1919 md_wakeup_thread(mddev->thread);
1923 static int do_md_run(mddev_t * mddev)
1927 struct list_head *tmp;
1929 struct gendisk *disk;
1930 char b[BDEVNAME_SIZE];
1932 if (list_empty(&mddev->disks))
1933 /* cannot run an array with no devices.. */
1940 * Analyze all RAID superblock(s)
1942 if (!mddev->raid_disks)
1945 chunk_size = mddev->chunk_size;
1946 pnum = level_to_pers(mddev->level);
1948 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1951 * 'default chunksize' in the old md code used to
1952 * be PAGE_SIZE, baaad.
1953 * we abort here to be on the safe side. We don't
1954 * want to continue the bad practice.
1957 "no chunksize specified, see 'man raidtab'\n");
1960 if (chunk_size > MAX_CHUNK_SIZE) {
1961 printk(KERN_ERR "too big chunk_size: %d > %d\n",
1962 chunk_size, MAX_CHUNK_SIZE);
1966 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1968 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1969 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1972 if (chunk_size < PAGE_SIZE) {
1973 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1974 chunk_size, PAGE_SIZE);
1978 /* devices must have minimum size of one chunk */
1979 ITERATE_RDEV(mddev,rdev,tmp) {
1980 if (test_bit(Faulty, &rdev->flags))
1982 if (rdev->size < chunk_size / 1024) {
1984 "md: Dev %s smaller than chunk_size:"
1986 bdevname(rdev->bdev,b),
1987 (unsigned long long)rdev->size,
1997 request_module("md-personality-%d", pnum);
2002 * Drop all container device buffers, from now on
2003 * the only valid external interface is through the md
2005 * Also find largest hardsector size
2007 ITERATE_RDEV(mddev,rdev,tmp) {
2008 if (test_bit(Faulty, &rdev->flags))
2010 sync_blockdev(rdev->bdev);
2011 invalidate_bdev(rdev->bdev, 0);
2014 md_probe(mddev->unit, NULL, NULL);
2015 disk = mddev->gendisk;
2019 spin_lock(&pers_lock);
2020 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
2021 spin_unlock(&pers_lock);
2022 printk(KERN_WARNING "md: personality %d is not loaded!\n",
2027 mddev->pers = pers[pnum];
2028 spin_unlock(&pers_lock);
2030 mddev->recovery = 0;
2031 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2032 mddev->barriers_work = 1;
2035 mddev->ro = 2; /* read-only, but switch on first write */
2037 /* before we start the array running, initialise the bitmap */
2038 err = bitmap_create(mddev);
2040 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2041 mdname(mddev), err);
2043 err = mddev->pers->run(mddev);
2045 printk(KERN_ERR "md: pers->run() failed ...\n");
2046 module_put(mddev->pers->owner);
2048 bitmap_destroy(mddev);
2051 atomic_set(&mddev->writes_pending,0);
2052 mddev->safemode = 0;
2053 mddev->safemode_timer.function = md_safemode_timeout;
2054 mddev->safemode_timer.data = (unsigned long) mddev;
2055 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
2058 ITERATE_RDEV(mddev,rdev,tmp)
2059 if (rdev->raid_disk >= 0) {
2061 sprintf(nm, "rd%d", rdev->raid_disk);
2062 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2065 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2066 md_wakeup_thread(mddev->thread);
2068 if (mddev->sb_dirty)
2069 md_update_sb(mddev);
2071 set_capacity(disk, mddev->array_size<<1);
2073 /* If we call blk_queue_make_request here, it will
2074 * re-initialise max_sectors etc which may have been
2075 * refined inside -> run. So just set the bits we need to set.
2076 * Most initialisation happended when we called
2077 * blk_queue_make_request(..., md_fail_request)
2080 mddev->queue->queuedata = mddev;
2081 mddev->queue->make_request_fn = mddev->pers->make_request;
2087 static int restart_array(mddev_t *mddev)
2089 struct gendisk *disk = mddev->gendisk;
2093 * Complain if it has no devices
2096 if (list_empty(&mddev->disks))
2104 mddev->safemode = 0;
2106 set_disk_ro(disk, 0);
2108 printk(KERN_INFO "md: %s switched to read-write mode.\n",
2111 * Kick recovery or resync if necessary
2113 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2114 md_wakeup_thread(mddev->thread);
2117 printk(KERN_ERR "md: %s has no personality assigned.\n",
2126 static int do_md_stop(mddev_t * mddev, int ro)
2129 struct gendisk *disk = mddev->gendisk;
2132 if (atomic_read(&mddev->active)>2) {
2133 printk("md: %s still in use.\n",mdname(mddev));
2137 if (mddev->sync_thread) {
2138 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2139 md_unregister_thread(mddev->sync_thread);
2140 mddev->sync_thread = NULL;
2143 del_timer_sync(&mddev->safemode_timer);
2145 invalidate_partition(disk, 0);
2153 bitmap_flush(mddev);
2154 md_super_wait(mddev);
2156 set_disk_ro(disk, 0);
2157 blk_queue_make_request(mddev->queue, md_fail_request);
2158 mddev->pers->stop(mddev);
2159 module_put(mddev->pers->owner);
2164 if (!mddev->in_sync) {
2165 /* mark array as shutdown cleanly */
2167 md_update_sb(mddev);
2170 set_disk_ro(disk, 1);
2173 bitmap_destroy(mddev);
2174 if (mddev->bitmap_file) {
2175 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2176 fput(mddev->bitmap_file);
2177 mddev->bitmap_file = NULL;
2179 mddev->bitmap_offset = 0;
2182 * Free resources if final stop
2186 struct list_head *tmp;
2187 struct gendisk *disk;
2188 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2190 ITERATE_RDEV(mddev,rdev,tmp)
2191 if (rdev->raid_disk >= 0) {
2193 sprintf(nm, "rd%d", rdev->raid_disk);
2194 sysfs_remove_link(&mddev->kobj, nm);
2197 export_array(mddev);
2199 mddev->array_size = 0;
2200 disk = mddev->gendisk;
2202 set_capacity(disk, 0);
2205 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2212 static void autorun_array(mddev_t *mddev)
2215 struct list_head *tmp;
2218 if (list_empty(&mddev->disks))
2221 printk(KERN_INFO "md: running: ");
2223 ITERATE_RDEV(mddev,rdev,tmp) {
2224 char b[BDEVNAME_SIZE];
2225 printk("<%s>", bdevname(rdev->bdev,b));
2229 err = do_md_run (mddev);
2231 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
2232 do_md_stop (mddev, 0);
2237 * lets try to run arrays based on all disks that have arrived
2238 * until now. (those are in pending_raid_disks)
2240 * the method: pick the first pending disk, collect all disks with
2241 * the same UUID, remove all from the pending list and put them into
2242 * the 'same_array' list. Then order this list based on superblock
2243 * update time (freshest comes first), kick out 'old' disks and
2244 * compare superblocks. If everything's fine then run it.
2246 * If "unit" is allocated, then bump its reference count
2248 static void autorun_devices(int part)
2250 struct list_head candidates;
2251 struct list_head *tmp;
2252 mdk_rdev_t *rdev0, *rdev;
2254 char b[BDEVNAME_SIZE];
2256 printk(KERN_INFO "md: autorun ...\n");
2257 while (!list_empty(&pending_raid_disks)) {
2259 rdev0 = list_entry(pending_raid_disks.next,
2260 mdk_rdev_t, same_set);
2262 printk(KERN_INFO "md: considering %s ...\n",
2263 bdevname(rdev0->bdev,b));
2264 INIT_LIST_HEAD(&candidates);
2265 ITERATE_RDEV_PENDING(rdev,tmp)
2266 if (super_90_load(rdev, rdev0, 0) >= 0) {
2267 printk(KERN_INFO "md: adding %s ...\n",
2268 bdevname(rdev->bdev,b));
2269 list_move(&rdev->same_set, &candidates);
2272 * now we have a set of devices, with all of them having
2273 * mostly sane superblocks. It's time to allocate the
2276 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
2277 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
2278 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
2282 dev = MKDEV(mdp_major,
2283 rdev0->preferred_minor << MdpMinorShift);
2285 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
2287 md_probe(dev, NULL, NULL);
2288 mddev = mddev_find(dev);
2291 "md: cannot allocate memory for md drive.\n");
2294 if (mddev_lock(mddev))
2295 printk(KERN_WARNING "md: %s locked, cannot run\n",
2297 else if (mddev->raid_disks || mddev->major_version
2298 || !list_empty(&mddev->disks)) {
2300 "md: %s already running, cannot run %s\n",
2301 mdname(mddev), bdevname(rdev0->bdev,b));
2302 mddev_unlock(mddev);
2304 printk(KERN_INFO "md: created %s\n", mdname(mddev));
2305 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
2306 list_del_init(&rdev->same_set);
2307 if (bind_rdev_to_array(rdev, mddev))
2310 autorun_array(mddev);
2311 mddev_unlock(mddev);
2313 /* on success, candidates will be empty, on error
2316 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
2320 printk(KERN_INFO "md: ... autorun DONE.\n");
2324 * import RAID devices based on one partition
2325 * if possible, the array gets run as well.
2328 static int autostart_array(dev_t startdev)
2330 char b[BDEVNAME_SIZE];
2331 int err = -EINVAL, i;
2332 mdp_super_t *sb = NULL;
2333 mdk_rdev_t *start_rdev = NULL, *rdev;
2335 start_rdev = md_import_device(startdev, 0, 0);
2336 if (IS_ERR(start_rdev))
2340 /* NOTE: this can only work for 0.90.0 superblocks */
2341 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2342 if (sb->major_version != 0 ||
2343 sb->minor_version != 90 ) {
2344 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2345 export_rdev(start_rdev);
2349 if (test_bit(Faulty, &start_rdev->flags)) {
2351 "md: can not autostart based on faulty %s!\n",
2352 bdevname(start_rdev->bdev,b));
2353 export_rdev(start_rdev);
2356 list_add(&start_rdev->same_set, &pending_raid_disks);
2358 for (i = 0; i < MD_SB_DISKS; i++) {
2359 mdp_disk_t *desc = sb->disks + i;
2360 dev_t dev = MKDEV(desc->major, desc->minor);
2364 if (dev == startdev)
2366 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2368 rdev = md_import_device(dev, 0, 0);
2372 list_add(&rdev->same_set, &pending_raid_disks);
2376 * possibly return codes
2384 static int get_version(void __user * arg)
2388 ver.major = MD_MAJOR_VERSION;
2389 ver.minor = MD_MINOR_VERSION;
2390 ver.patchlevel = MD_PATCHLEVEL_VERSION;
2392 if (copy_to_user(arg, &ver, sizeof(ver)))
2398 static int get_array_info(mddev_t * mddev, void __user * arg)
2400 mdu_array_info_t info;
2401 int nr,working,active,failed,spare;
2403 struct list_head *tmp;
2405 nr=working=active=failed=spare=0;
2406 ITERATE_RDEV(mddev,rdev,tmp) {
2408 if (test_bit(Faulty, &rdev->flags))
2412 if (test_bit(In_sync, &rdev->flags))
2419 info.major_version = mddev->major_version;
2420 info.minor_version = mddev->minor_version;
2421 info.patch_version = MD_PATCHLEVEL_VERSION;
2422 info.ctime = mddev->ctime;
2423 info.level = mddev->level;
2424 info.size = mddev->size;
2426 info.raid_disks = mddev->raid_disks;
2427 info.md_minor = mddev->md_minor;
2428 info.not_persistent= !mddev->persistent;
2430 info.utime = mddev->utime;
2433 info.state = (1<<MD_SB_CLEAN);
2434 if (mddev->bitmap && mddev->bitmap_offset)
2435 info.state = (1<<MD_SB_BITMAP_PRESENT);
2436 info.active_disks = active;
2437 info.working_disks = working;
2438 info.failed_disks = failed;
2439 info.spare_disks = spare;
2441 info.layout = mddev->layout;
2442 info.chunk_size = mddev->chunk_size;
2444 if (copy_to_user(arg, &info, sizeof(info)))
2450 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
2452 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2453 char *ptr, *buf = NULL;
2456 file = kmalloc(sizeof(*file), GFP_KERNEL);
2460 /* bitmap disabled, zero the first byte and copy out */
2461 if (!mddev->bitmap || !mddev->bitmap->file) {
2462 file->pathname[0] = '\0';
2466 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2470 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2474 strcpy(file->pathname, ptr);
2478 if (copy_to_user(arg, file, sizeof(*file)))
2486 static int get_disk_info(mddev_t * mddev, void __user * arg)
2488 mdu_disk_info_t info;
2492 if (copy_from_user(&info, arg, sizeof(info)))
2497 rdev = find_rdev_nr(mddev, nr);
2499 info.major = MAJOR(rdev->bdev->bd_dev);
2500 info.minor = MINOR(rdev->bdev->bd_dev);
2501 info.raid_disk = rdev->raid_disk;
2503 if (test_bit(Faulty, &rdev->flags))
2504 info.state |= (1<<MD_DISK_FAULTY);
2505 else if (test_bit(In_sync, &rdev->flags)) {
2506 info.state |= (1<<MD_DISK_ACTIVE);
2507 info.state |= (1<<MD_DISK_SYNC);
2509 if (test_bit(WriteMostly, &rdev->flags))
2510 info.state |= (1<<MD_DISK_WRITEMOSTLY);
2512 info.major = info.minor = 0;
2513 info.raid_disk = -1;
2514 info.state = (1<<MD_DISK_REMOVED);
2517 if (copy_to_user(arg, &info, sizeof(info)))
2523 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2525 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2527 dev_t dev = MKDEV(info->major,info->minor);
2529 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2532 if (!mddev->raid_disks) {
2534 /* expecting a device which has a superblock */
2535 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2538 "md: md_import_device returned %ld\n",
2540 return PTR_ERR(rdev);
2542 if (!list_empty(&mddev->disks)) {
2543 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2544 mdk_rdev_t, same_set);
2545 int err = super_types[mddev->major_version]
2546 .load_super(rdev, rdev0, mddev->minor_version);
2549 "md: %s has different UUID to %s\n",
2550 bdevname(rdev->bdev,b),
2551 bdevname(rdev0->bdev,b2));
2556 err = bind_rdev_to_array(rdev, mddev);
2563 * add_new_disk can be used once the array is assembled
2564 * to add "hot spares". They must already have a superblock
2569 if (!mddev->pers->hot_add_disk) {
2571 "%s: personality does not support diskops!\n",
2575 if (mddev->persistent)
2576 rdev = md_import_device(dev, mddev->major_version,
2577 mddev->minor_version);
2579 rdev = md_import_device(dev, -1, -1);
2582 "md: md_import_device returned %ld\n",
2584 return PTR_ERR(rdev);
2586 /* set save_raid_disk if appropriate */
2587 if (!mddev->persistent) {
2588 if (info->state & (1<<MD_DISK_SYNC) &&
2589 info->raid_disk < mddev->raid_disks)
2590 rdev->raid_disk = info->raid_disk;
2592 rdev->raid_disk = -1;
2594 super_types[mddev->major_version].
2595 validate_super(mddev, rdev);
2596 rdev->saved_raid_disk = rdev->raid_disk;
2598 clear_bit(In_sync, &rdev->flags); /* just to be sure */
2599 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2600 set_bit(WriteMostly, &rdev->flags);
2602 rdev->raid_disk = -1;
2603 err = bind_rdev_to_array(rdev, mddev);
2607 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2608 md_wakeup_thread(mddev->thread);
2612 /* otherwise, add_new_disk is only allowed
2613 * for major_version==0 superblocks
2615 if (mddev->major_version != 0) {
2616 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2621 if (!(info->state & (1<<MD_DISK_FAULTY))) {
2623 rdev = md_import_device (dev, -1, 0);
2626 "md: error, md_import_device() returned %ld\n",
2628 return PTR_ERR(rdev);
2630 rdev->desc_nr = info->number;
2631 if (info->raid_disk < mddev->raid_disks)
2632 rdev->raid_disk = info->raid_disk;
2634 rdev->raid_disk = -1;
2638 if (rdev->raid_disk < mddev->raid_disks)
2639 if (info->state & (1<<MD_DISK_SYNC))
2640 set_bit(In_sync, &rdev->flags);
2642 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2643 set_bit(WriteMostly, &rdev->flags);
2645 err = bind_rdev_to_array(rdev, mddev);
2651 if (!mddev->persistent) {
2652 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2653 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2655 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2656 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2658 if (!mddev->size || (mddev->size > rdev->size))
2659 mddev->size = rdev->size;
2665 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2667 char b[BDEVNAME_SIZE];
2673 rdev = find_rdev(mddev, dev);
2677 if (rdev->raid_disk >= 0)
2680 kick_rdev_from_array(rdev);
2681 md_update_sb(mddev);
2685 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2686 bdevname(rdev->bdev,b), mdname(mddev));
2690 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2692 char b[BDEVNAME_SIZE];
2700 if (mddev->major_version != 0) {
2701 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2702 " version-0 superblocks.\n",
2706 if (!mddev->pers->hot_add_disk) {
2708 "%s: personality does not support diskops!\n",
2713 rdev = md_import_device (dev, -1, 0);
2716 "md: error, md_import_device() returned %ld\n",
2721 if (mddev->persistent)
2722 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2725 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2727 size = calc_dev_size(rdev, mddev->chunk_size);
2730 if (size < mddev->size) {
2732 "%s: disk size %llu blocks < array size %llu\n",
2733 mdname(mddev), (unsigned long long)size,
2734 (unsigned long long)mddev->size);
2739 if (test_bit(Faulty, &rdev->flags)) {
2741 "md: can not hot-add faulty %s disk to %s!\n",
2742 bdevname(rdev->bdev,b), mdname(mddev));
2746 clear_bit(In_sync, &rdev->flags);
2748 bind_rdev_to_array(rdev, mddev);
2751 * The rest should better be atomic, we can have disk failures
2752 * noticed in interrupt contexts ...
2755 if (rdev->desc_nr == mddev->max_disks) {
2756 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2759 goto abort_unbind_export;
2762 rdev->raid_disk = -1;
2764 md_update_sb(mddev);
2767 * Kick recovery, maybe this spare has to be added to the
2768 * array immediately.
2770 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2771 md_wakeup_thread(mddev->thread);
2775 abort_unbind_export:
2776 unbind_rdev_from_array(rdev);
2783 /* similar to deny_write_access, but accounts for our holding a reference
2784 * to the file ourselves */
2785 static int deny_bitmap_write_access(struct file * file)
2787 struct inode *inode = file->f_mapping->host;
2789 spin_lock(&inode->i_lock);
2790 if (atomic_read(&inode->i_writecount) > 1) {
2791 spin_unlock(&inode->i_lock);
2794 atomic_set(&inode->i_writecount, -1);
2795 spin_unlock(&inode->i_lock);
2800 static int set_bitmap_file(mddev_t *mddev, int fd)
2805 if (!mddev->pers->quiesce)
2807 if (mddev->recovery || mddev->sync_thread)
2809 /* we should be able to change the bitmap.. */
2815 return -EEXIST; /* cannot add when bitmap is present */
2816 mddev->bitmap_file = fget(fd);
2818 if (mddev->bitmap_file == NULL) {
2819 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
2824 err = deny_bitmap_write_access(mddev->bitmap_file);
2826 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
2828 fput(mddev->bitmap_file);
2829 mddev->bitmap_file = NULL;
2832 mddev->bitmap_offset = 0; /* file overrides offset */
2833 } else if (mddev->bitmap == NULL)
2834 return -ENOENT; /* cannot remove what isn't there */
2837 mddev->pers->quiesce(mddev, 1);
2839 err = bitmap_create(mddev);
2841 bitmap_destroy(mddev);
2842 mddev->pers->quiesce(mddev, 0);
2843 } else if (fd < 0) {
2844 if (mddev->bitmap_file)
2845 fput(mddev->bitmap_file);
2846 mddev->bitmap_file = NULL;
2853 * set_array_info is used two different ways
2854 * The original usage is when creating a new array.
2855 * In this usage, raid_disks is > 0 and it together with
2856 * level, size, not_persistent,layout,chunksize determine the
2857 * shape of the array.
2858 * This will always create an array with a type-0.90.0 superblock.
2859 * The newer usage is when assembling an array.
2860 * In this case raid_disks will be 0, and the major_version field is
2861 * use to determine which style super-blocks are to be found on the devices.
2862 * The minor and patch _version numbers are also kept incase the
2863 * super_block handler wishes to interpret them.
2865 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2868 if (info->raid_disks == 0) {
2869 /* just setting version number for superblock loading */
2870 if (info->major_version < 0 ||
2871 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2872 super_types[info->major_version].name == NULL) {
2873 /* maybe try to auto-load a module? */
2875 "md: superblock version %d not known\n",
2876 info->major_version);
2879 mddev->major_version = info->major_version;
2880 mddev->minor_version = info->minor_version;
2881 mddev->patch_version = info->patch_version;
2884 mddev->major_version = MD_MAJOR_VERSION;
2885 mddev->minor_version = MD_MINOR_VERSION;
2886 mddev->patch_version = MD_PATCHLEVEL_VERSION;
2887 mddev->ctime = get_seconds();
2889 mddev->level = info->level;
2890 mddev->size = info->size;
2891 mddev->raid_disks = info->raid_disks;
2892 /* don't set md_minor, it is determined by which /dev/md* was
2895 if (info->state & (1<<MD_SB_CLEAN))
2896 mddev->recovery_cp = MaxSector;
2898 mddev->recovery_cp = 0;
2899 mddev->persistent = ! info->not_persistent;
2901 mddev->layout = info->layout;
2902 mddev->chunk_size = info->chunk_size;
2904 mddev->max_disks = MD_SB_DISKS;
2906 mddev->sb_dirty = 1;
2909 * Generate a 128 bit UUID
2911 get_random_bytes(mddev->uuid, 16);
2917 * update_array_info is used to change the configuration of an
2919 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2920 * fields in the info are checked against the array.
2921 * Any differences that cannot be handled will cause an error.
2922 * Normally, only one change can be managed at a time.
2924 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2930 /* calculate expected state,ignoring low bits */
2931 if (mddev->bitmap && mddev->bitmap_offset)
2932 state |= (1 << MD_SB_BITMAP_PRESENT);
2934 if (mddev->major_version != info->major_version ||
2935 mddev->minor_version != info->minor_version ||
2936 /* mddev->patch_version != info->patch_version || */
2937 mddev->ctime != info->ctime ||
2938 mddev->level != info->level ||
2939 /* mddev->layout != info->layout || */
2940 !mddev->persistent != info->not_persistent||
2941 mddev->chunk_size != info->chunk_size ||
2942 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
2943 ((state^info->state) & 0xfffffe00)
2946 /* Check there is only one change */
2947 if (mddev->size != info->size) cnt++;
2948 if (mddev->raid_disks != info->raid_disks) cnt++;
2949 if (mddev->layout != info->layout) cnt++;
2950 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
2951 if (cnt == 0) return 0;
2952 if (cnt > 1) return -EINVAL;
2954 if (mddev->layout != info->layout) {
2956 * we don't need to do anything at the md level, the
2957 * personality will take care of it all.
2959 if (mddev->pers->reconfig == NULL)
2962 return mddev->pers->reconfig(mddev, info->layout, -1);
2964 if (mddev->size != info->size) {
2966 struct list_head *tmp;
2967 if (mddev->pers->resize == NULL)
2969 /* The "size" is the amount of each device that is used.
2970 * This can only make sense for arrays with redundancy.
2971 * linear and raid0 always use whatever space is available
2972 * We can only consider changing the size if no resync
2973 * or reconstruction is happening, and if the new size
2974 * is acceptable. It must fit before the sb_offset or,
2975 * if that is <data_offset, it must fit before the
2976 * size of each device.
2977 * If size is zero, we find the largest size that fits.
2979 if (mddev->sync_thread)
2981 ITERATE_RDEV(mddev,rdev,tmp) {
2983 int fit = (info->size == 0);
2984 if (rdev->sb_offset > rdev->data_offset)
2985 avail = (rdev->sb_offset*2) - rdev->data_offset;
2987 avail = get_capacity(rdev->bdev->bd_disk)
2988 - rdev->data_offset;
2989 if (fit && (info->size == 0 || info->size > avail/2))
2990 info->size = avail/2;
2991 if (avail < ((sector_t)info->size << 1))
2994 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
2996 struct block_device *bdev;
2998 bdev = bdget_disk(mddev->gendisk, 0);
3000 down(&bdev->bd_inode->i_sem);
3001 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3002 up(&bdev->bd_inode->i_sem);
3007 if (mddev->raid_disks != info->raid_disks) {
3008 /* change the number of raid disks */
3009 if (mddev->pers->reshape == NULL)
3011 if (info->raid_disks <= 0 ||
3012 info->raid_disks >= mddev->max_disks)
3014 if (mddev->sync_thread)
3016 rv = mddev->pers->reshape(mddev, info->raid_disks);
3018 struct block_device *bdev;
3020 bdev = bdget_disk(mddev->gendisk, 0);
3022 down(&bdev->bd_inode->i_sem);
3023 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3024 up(&bdev->bd_inode->i_sem);
3029 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3030 if (mddev->pers->quiesce == NULL)
3032 if (mddev->recovery || mddev->sync_thread)
3034 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
3035 /* add the bitmap */
3038 if (mddev->default_bitmap_offset == 0)
3040 mddev->bitmap_offset = mddev->default_bitmap_offset;
3041 mddev->pers->quiesce(mddev, 1);
3042 rv = bitmap_create(mddev);
3044 bitmap_destroy(mddev);
3045 mddev->pers->quiesce(mddev, 0);
3047 /* remove the bitmap */
3050 if (mddev->bitmap->file)
3052 mddev->pers->quiesce(mddev, 1);
3053 bitmap_destroy(mddev);
3054 mddev->pers->quiesce(mddev, 0);
3055 mddev->bitmap_offset = 0;
3058 md_update_sb(mddev);
3062 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
3066 if (mddev->pers == NULL)
3069 rdev = find_rdev(mddev, dev);
3073 md_error(mddev, rdev);
3077 static int md_ioctl(struct inode *inode, struct file *file,
3078 unsigned int cmd, unsigned long arg)
3081 void __user *argp = (void __user *)arg;
3082 struct hd_geometry __user *loc = argp;
3083 mddev_t *mddev = NULL;
3085 if (!capable(CAP_SYS_ADMIN))
3089 * Commands dealing with the RAID driver but not any
3095 err = get_version(argp);
3098 case PRINT_RAID_DEBUG:
3106 autostart_arrays(arg);
3113 * Commands creating/starting a new array:
3116 mddev = inode->i_bdev->bd_disk->private_data;
3124 if (cmd == START_ARRAY) {
3125 /* START_ARRAY doesn't need to lock the array as autostart_array
3126 * does the locking, and it could even be a different array
3131 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
3132 "This will not be supported beyond 2.6\n",
3133 current->comm, current->pid);
3136 err = autostart_array(new_decode_dev(arg));
3138 printk(KERN_WARNING "md: autostart failed!\n");
3144 err = mddev_lock(mddev);
3147 "md: ioctl lock interrupted, reason %d, cmd %d\n",
3154 case SET_ARRAY_INFO:
3156 mdu_array_info_t info;
3158 memset(&info, 0, sizeof(info));
3159 else if (copy_from_user(&info, argp, sizeof(info))) {
3164 err = update_array_info(mddev, &info);
3166 printk(KERN_WARNING "md: couldn't update"
3167 " array info. %d\n", err);
3172 if (!list_empty(&mddev->disks)) {
3174 "md: array %s already has disks!\n",
3179 if (mddev->raid_disks) {
3181 "md: array %s already initialised!\n",
3186 err = set_array_info(mddev, &info);
3188 printk(KERN_WARNING "md: couldn't set"
3189 " array info. %d\n", err);
3199 * Commands querying/configuring an existing array:
3201 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
3202 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
3203 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
3204 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
3210 * Commands even a read-only array can execute:
3214 case GET_ARRAY_INFO:
3215 err = get_array_info(mddev, argp);
3218 case GET_BITMAP_FILE:
3219 err = get_bitmap_file(mddev, argp);
3223 err = get_disk_info(mddev, argp);
3226 case RESTART_ARRAY_RW:
3227 err = restart_array(mddev);
3231 err = do_md_stop (mddev, 0);
3235 err = do_md_stop (mddev, 1);
3239 * We have a problem here : there is no easy way to give a CHS
3240 * virtual geometry. We currently pretend that we have a 2 heads
3241 * 4 sectors (with a BIG number of cylinders...). This drives
3242 * dosfs just mad... ;-)
3249 err = put_user (2, (char __user *) &loc->heads);
3252 err = put_user (4, (char __user *) &loc->sectors);
3255 err = put_user(get_capacity(mddev->gendisk)/8,
3256 (short __user *) &loc->cylinders);
3259 err = put_user (get_start_sect(inode->i_bdev),
3260 (long __user *) &loc->start);
3265 * The remaining ioctls are changing the state of the
3266 * superblock, so we do not allow them on read-only arrays.
3267 * However non-MD ioctls (e.g. get-size) will still come through
3268 * here and hit the 'default' below, so only disallow
3269 * 'md' ioctls, and switch to rw mode if started auto-readonly.
3271 if (_IOC_TYPE(cmd) == MD_MAJOR &&
3272 mddev->ro && mddev->pers) {
3273 if (mddev->ro == 2) {
3275 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3276 md_wakeup_thread(mddev->thread);
3288 mdu_disk_info_t info;
3289 if (copy_from_user(&info, argp, sizeof(info)))
3292 err = add_new_disk(mddev, &info);
3296 case HOT_REMOVE_DISK:
3297 err = hot_remove_disk(mddev, new_decode_dev(arg));
3301 err = hot_add_disk(mddev, new_decode_dev(arg));
3304 case SET_DISK_FAULTY:
3305 err = set_disk_faulty(mddev, new_decode_dev(arg));
3309 err = do_md_run (mddev);
3312 case SET_BITMAP_FILE:
3313 err = set_bitmap_file(mddev, (int)arg);
3317 if (_IOC_TYPE(cmd) == MD_MAJOR)
3318 printk(KERN_WARNING "md: %s(pid %d) used"
3319 " obsolete MD ioctl, upgrade your"
3320 " software to use new ictls.\n",
3321 current->comm, current->pid);
3328 mddev_unlock(mddev);
3338 static int md_open(struct inode *inode, struct file *file)
3341 * Succeed if we can lock the mddev, which confirms that
3342 * it isn't being stopped right now.
3344 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3347 if ((err = mddev_lock(mddev)))
3352 mddev_unlock(mddev);
3354 check_disk_change(inode->i_bdev);
3359 static int md_release(struct inode *inode, struct file * file)
3361 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3370 static int md_media_changed(struct gendisk *disk)
3372 mddev_t *mddev = disk->private_data;
3374 return mddev->changed;
3377 static int md_revalidate(struct gendisk *disk)
3379 mddev_t *mddev = disk->private_data;
3384 static struct block_device_operations md_fops =
3386 .owner = THIS_MODULE,
3388 .release = md_release,
3390 .media_changed = md_media_changed,
3391 .revalidate_disk= md_revalidate,
3394 static int md_thread(void * arg)
3396 mdk_thread_t *thread = arg;
3399 * md_thread is a 'system-thread', it's priority should be very
3400 * high. We avoid resource deadlocks individually in each
3401 * raid personality. (RAID5 does preallocation) We also use RR and
3402 * the very same RT priority as kswapd, thus we will never get
3403 * into a priority inversion deadlock.
3405 * we definitely have to have equal or higher priority than
3406 * bdflush, otherwise bdflush will deadlock if there are too
3407 * many dirty RAID5 blocks.
3410 allow_signal(SIGKILL);
3411 complete(thread->event);
3412 while (!kthread_should_stop()) {
3413 void (*run)(mddev_t *);
3415 wait_event_interruptible_timeout(thread->wqueue,
3416 test_bit(THREAD_WAKEUP, &thread->flags)
3417 || kthread_should_stop(),
3421 clear_bit(THREAD_WAKEUP, &thread->flags);
3431 void md_wakeup_thread(mdk_thread_t *thread)
3434 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3435 set_bit(THREAD_WAKEUP, &thread->flags);
3436 wake_up(&thread->wqueue);
3440 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3443 mdk_thread_t *thread;
3444 struct completion event;
3446 thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
3450 memset(thread, 0, sizeof(mdk_thread_t));
3451 init_waitqueue_head(&thread->wqueue);
3453 init_completion(&event);
3454 thread->event = &event;
3456 thread->mddev = mddev;
3457 thread->name = name;
3458 thread->timeout = MAX_SCHEDULE_TIMEOUT;
3459 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
3460 if (IS_ERR(thread->tsk)) {
3464 wait_for_completion(&event);
3468 void md_unregister_thread(mdk_thread_t *thread)
3470 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
3472 kthread_stop(thread->tsk);
3476 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3483 if (!rdev || test_bit(Faulty, &rdev->flags))
3486 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
3488 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
3489 __builtin_return_address(0),__builtin_return_address(1),
3490 __builtin_return_address(2),__builtin_return_address(3));
3492 if (!mddev->pers->error_handler)
3494 mddev->pers->error_handler(mddev,rdev);
3495 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3496 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3497 md_wakeup_thread(mddev->thread);
3500 /* seq_file implementation /proc/mdstat */
3502 static void status_unused(struct seq_file *seq)
3506 struct list_head *tmp;
3508 seq_printf(seq, "unused devices: ");
3510 ITERATE_RDEV_PENDING(rdev,tmp) {
3511 char b[BDEVNAME_SIZE];
3513 seq_printf(seq, "%s ",
3514 bdevname(rdev->bdev,b));
3517 seq_printf(seq, "<none>");
3519 seq_printf(seq, "\n");
3523 static void status_resync(struct seq_file *seq, mddev_t * mddev)
3525 unsigned long max_blocks, resync, res, dt, db, rt;
3527 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
3529 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3530 max_blocks = mddev->resync_max_sectors >> 1;
3532 max_blocks = mddev->size;
3535 * Should not happen.
3541 res = (resync/1024)*1000/(max_blocks/1024 + 1);
3543 int i, x = res/50, y = 20-x;
3544 seq_printf(seq, "[");
3545 for (i = 0; i < x; i++)
3546 seq_printf(seq, "=");
3547 seq_printf(seq, ">");
3548 for (i = 0; i < y; i++)
3549 seq_printf(seq, ".");
3550 seq_printf(seq, "] ");
3552 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
3553 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
3554 "resync" : "recovery"),
3555 res/10, res % 10, resync, max_blocks);
3558 * We do not want to overflow, so the order of operands and
3559 * the * 100 / 100 trick are important. We do a +1 to be
3560 * safe against division by zero. We only estimate anyway.
3562 * dt: time from mark until now
3563 * db: blocks written from mark until now
3564 * rt: remaining time
3566 dt = ((jiffies - mddev->resync_mark) / HZ);
3568 db = resync - (mddev->resync_mark_cnt/2);
3569 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
3571 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
3573 seq_printf(seq, " speed=%ldK/sec", db/dt);
3576 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
3578 struct list_head *tmp;
3588 spin_lock(&all_mddevs_lock);
3589 list_for_each(tmp,&all_mddevs)
3591 mddev = list_entry(tmp, mddev_t, all_mddevs);
3593 spin_unlock(&all_mddevs_lock);
3596 spin_unlock(&all_mddevs_lock);
3598 return (void*)2;/* tail */
3602 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3604 struct list_head *tmp;
3605 mddev_t *next_mddev, *mddev = v;
3611 spin_lock(&all_mddevs_lock);
3613 tmp = all_mddevs.next;
3615 tmp = mddev->all_mddevs.next;
3616 if (tmp != &all_mddevs)
3617 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3619 next_mddev = (void*)2;
3622 spin_unlock(&all_mddevs_lock);
3630 static void md_seq_stop(struct seq_file *seq, void *v)
3634 if (mddev && v != (void*)1 && v != (void*)2)
3638 static int md_seq_show(struct seq_file *seq, void *v)
3642 struct list_head *tmp2;
3645 struct bitmap *bitmap;
3647 if (v == (void*)1) {
3648 seq_printf(seq, "Personalities : ");
3649 spin_lock(&pers_lock);
3650 for (i = 0; i < MAX_PERSONALITY; i++)
3652 seq_printf(seq, "[%s] ", pers[i]->name);
3654 spin_unlock(&pers_lock);
3655 seq_printf(seq, "\n");
3658 if (v == (void*)2) {
3663 if (mddev_lock(mddev)!=0)
3665 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3666 seq_printf(seq, "%s : %sactive", mdname(mddev),
3667 mddev->pers ? "" : "in");
3670 seq_printf(seq, " (read-only)");
3672 seq_printf(seq, "(auto-read-only)");
3673 seq_printf(seq, " %s", mddev->pers->name);
3677 ITERATE_RDEV(mddev,rdev,tmp2) {
3678 char b[BDEVNAME_SIZE];
3679 seq_printf(seq, " %s[%d]",
3680 bdevname(rdev->bdev,b), rdev->desc_nr);
3681 if (test_bit(WriteMostly, &rdev->flags))
3682 seq_printf(seq, "(W)");
3683 if (test_bit(Faulty, &rdev->flags)) {
3684 seq_printf(seq, "(F)");
3686 } else if (rdev->raid_disk < 0)
3687 seq_printf(seq, "(S)"); /* spare */
3691 if (!list_empty(&mddev->disks)) {
3693 seq_printf(seq, "\n %llu blocks",
3694 (unsigned long long)mddev->array_size);
3696 seq_printf(seq, "\n %llu blocks",
3697 (unsigned long long)size);
3699 if (mddev->persistent) {
3700 if (mddev->major_version != 0 ||
3701 mddev->minor_version != 90) {
3702 seq_printf(seq," super %d.%d",
3703 mddev->major_version,
3704 mddev->minor_version);
3707 seq_printf(seq, " super non-persistent");
3710 mddev->pers->status (seq, mddev);
3711 seq_printf(seq, "\n ");
3712 if (mddev->curr_resync > 2) {
3713 status_resync (seq, mddev);
3714 seq_printf(seq, "\n ");
3715 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3716 seq_printf(seq, "\tresync=DELAYED\n ");
3717 else if (mddev->recovery_cp < MaxSector)
3718 seq_printf(seq, "\tresync=PENDING\n ");
3720 seq_printf(seq, "\n ");
3722 if ((bitmap = mddev->bitmap)) {
3723 unsigned long chunk_kb;
3724 unsigned long flags;
3725 spin_lock_irqsave(&bitmap->lock, flags);
3726 chunk_kb = bitmap->chunksize >> 10;
3727 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
3729 bitmap->pages - bitmap->missing_pages,
3731 (bitmap->pages - bitmap->missing_pages)
3732 << (PAGE_SHIFT - 10),
3733 chunk_kb ? chunk_kb : bitmap->chunksize,
3734 chunk_kb ? "KB" : "B");
3736 seq_printf(seq, ", file: ");
3737 seq_path(seq, bitmap->file->f_vfsmnt,
3738 bitmap->file->f_dentry," \t\n");
3741 seq_printf(seq, "\n");
3742 spin_unlock_irqrestore(&bitmap->lock, flags);
3745 seq_printf(seq, "\n");
3747 mddev_unlock(mddev);
3752 static struct seq_operations md_seq_ops = {
3753 .start = md_seq_start,
3754 .next = md_seq_next,
3755 .stop = md_seq_stop,
3756 .show = md_seq_show,
3759 static int md_seq_open(struct inode *inode, struct file *file)
3763 error = seq_open(file, &md_seq_ops);
3767 static struct file_operations md_seq_fops = {
3768 .open = md_seq_open,
3770 .llseek = seq_lseek,
3771 .release = seq_release,
3774 int register_md_personality(int pnum, mdk_personality_t *p)
3776 if (pnum >= MAX_PERSONALITY) {
3778 "md: tried to install personality %s as nr %d, but max is %lu\n",
3779 p->name, pnum, MAX_PERSONALITY-1);
3783 spin_lock(&pers_lock);
3785 spin_unlock(&pers_lock);
3790 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3791 spin_unlock(&pers_lock);
3795 int unregister_md_personality(int pnum)
3797 if (pnum >= MAX_PERSONALITY)
3800 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3801 spin_lock(&pers_lock);
3803 spin_unlock(&pers_lock);
3807 static int is_mddev_idle(mddev_t *mddev)
3810 struct list_head *tmp;
3812 unsigned long curr_events;
3815 ITERATE_RDEV(mddev,rdev,tmp) {
3816 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3817 curr_events = disk_stat_read(disk, sectors[0]) +
3818 disk_stat_read(disk, sectors[1]) -
3819 atomic_read(&disk->sync_io);
3820 /* Allow some slack between valud of curr_events and last_events,
3821 * as there are some uninteresting races.
3822 * Note: the following is an unsigned comparison.
3824 if ((curr_events - rdev->last_events + 32) > 64) {
3825 rdev->last_events = curr_events;
3832 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3834 /* another "blocks" (512byte) blocks have been synced */
3835 atomic_sub(blocks, &mddev->recovery_active);
3836 wake_up(&mddev->recovery_wait);
3838 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3839 md_wakeup_thread(mddev->thread);
3840 // stop recovery, signal do_sync ....
3845 /* md_write_start(mddev, bi)
3846 * If we need to update some array metadata (e.g. 'active' flag
3847 * in superblock) before writing, schedule a superblock update
3848 * and wait for it to complete.
3850 void md_write_start(mddev_t *mddev, struct bio *bi)
3852 if (bio_data_dir(bi) != WRITE)
3855 BUG_ON(mddev->ro == 1);
3856 if (mddev->ro == 2) {
3857 /* need to switch to read/write */
3859 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3860 md_wakeup_thread(mddev->thread);
3862 atomic_inc(&mddev->writes_pending);
3863 if (mddev->in_sync) {
3864 spin_lock_irq(&mddev->write_lock);
3865 if (mddev->in_sync) {
3867 mddev->sb_dirty = 1;
3868 md_wakeup_thread(mddev->thread);
3870 spin_unlock_irq(&mddev->write_lock);
3872 wait_event(mddev->sb_wait, mddev->sb_dirty==0);
3875 void md_write_end(mddev_t *mddev)
3877 if (atomic_dec_and_test(&mddev->writes_pending)) {
3878 if (mddev->safemode == 2)
3879 md_wakeup_thread(mddev->thread);
3881 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3885 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3887 #define SYNC_MARKS 10
3888 #define SYNC_MARK_STEP (3*HZ)
3889 static void md_do_sync(mddev_t *mddev)
3892 unsigned int currspeed = 0,
3894 sector_t max_sectors,j, io_sectors;
3895 unsigned long mark[SYNC_MARKS];
3896 sector_t mark_cnt[SYNC_MARKS];
3898 struct list_head *tmp;
3899 sector_t last_check;
3902 /* just incase thread restarts... */
3903 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3906 /* we overload curr_resync somewhat here.
3907 * 0 == not engaged in resync at all
3908 * 2 == checking that there is no conflict with another sync
3909 * 1 == like 2, but have yielded to allow conflicting resync to
3911 * other == active in resync - this many blocks
3913 * Before starting a resync we must have set curr_resync to
3914 * 2, and then checked that every "conflicting" array has curr_resync
3915 * less than ours. When we find one that is the same or higher
3916 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
3917 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
3918 * This will mean we have to start checking from the beginning again.
3923 mddev->curr_resync = 2;
3926 if (signal_pending(current) ||
3927 kthread_should_stop()) {
3928 flush_signals(current);
3929 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3932 ITERATE_MDDEV(mddev2,tmp) {
3933 if (mddev2 == mddev)
3935 if (mddev2->curr_resync &&
3936 match_mddev_units(mddev,mddev2)) {
3938 if (mddev < mddev2 && mddev->curr_resync == 2) {
3939 /* arbitrarily yield */
3940 mddev->curr_resync = 1;
3941 wake_up(&resync_wait);
3943 if (mddev > mddev2 && mddev->curr_resync == 1)
3944 /* no need to wait here, we can wait the next
3945 * time 'round when curr_resync == 2
3948 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
3949 if (!signal_pending(current) &&
3950 !kthread_should_stop() &&
3951 mddev2->curr_resync >= mddev->curr_resync) {
3952 printk(KERN_INFO "md: delaying resync of %s"
3953 " until %s has finished resync (they"
3954 " share one or more physical units)\n",
3955 mdname(mddev), mdname(mddev2));
3958 finish_wait(&resync_wait, &wq);
3961 finish_wait(&resync_wait, &wq);
3964 } while (mddev->curr_resync < 2);
3966 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3967 /* resync follows the size requested by the personality,
3968 * which defaults to physical size, but can be virtual size
3970 max_sectors = mddev->resync_max_sectors;
3971 mddev->resync_mismatches = 0;
3973 /* recovery follows the physical size of devices */
3974 max_sectors = mddev->size << 1;
3976 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
3977 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3978 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3979 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
3980 "(but not more than %d KB/sec) for reconstruction.\n",
3981 sysctl_speed_limit_max);
3983 is_mddev_idle(mddev); /* this also initializes IO event counters */
3984 /* we don't use the checkpoint if there's a bitmap */
3985 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
3986 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3987 j = mddev->recovery_cp;
3991 for (m = 0; m < SYNC_MARKS; m++) {
3993 mark_cnt[m] = io_sectors;
3996 mddev->resync_mark = mark[last_mark];
3997 mddev->resync_mark_cnt = mark_cnt[last_mark];
4000 * Tune reconstruction:
4002 window = 32*(PAGE_SIZE/512);
4003 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
4004 window/2,(unsigned long long) max_sectors/2);
4006 atomic_set(&mddev->recovery_active, 0);
4007 init_waitqueue_head(&mddev->recovery_wait);
4012 "md: resuming recovery of %s from checkpoint.\n",
4014 mddev->curr_resync = j;
4017 while (j < max_sectors) {
4021 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4022 currspeed < sysctl_speed_limit_min);
4024 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4028 if (!skipped) { /* actual IO requested */
4029 io_sectors += sectors;
4030 atomic_add(sectors, &mddev->recovery_active);
4034 if (j>1) mddev->curr_resync = j;
4037 if (last_check + window > io_sectors || j == max_sectors)
4040 last_check = io_sectors;
4042 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
4043 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
4047 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
4049 int next = (last_mark+1) % SYNC_MARKS;
4051 mddev->resync_mark = mark[next];
4052 mddev->resync_mark_cnt = mark_cnt[next];
4053 mark[next] = jiffies;
4054 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
4059 if (signal_pending(current) || kthread_should_stop()) {
4061 * got a signal, exit.
4064 "md: md_do_sync() got signal ... exiting\n");
4065 flush_signals(current);
4066 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4071 * this loop exits only if either when we are slower than
4072 * the 'hard' speed limit, or the system was IO-idle for
4074 * the system might be non-idle CPU-wise, but we only care
4075 * about not overloading the IO subsystem. (things like an
4076 * e2fsck being done on the RAID array should execute fast)
4078 mddev->queue->unplug_fn(mddev->queue);
4081 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4082 /((jiffies-mddev->resync_mark)/HZ +1) +1;
4084 if (currspeed > sysctl_speed_limit_min) {
4085 if ((currspeed > sysctl_speed_limit_max) ||
4086 !is_mddev_idle(mddev)) {
4087 msleep_interruptible(250);
4092 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
4094 * this also signals 'finished resyncing' to md_stop
4097 mddev->queue->unplug_fn(mddev->queue);
4099 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
4101 /* tell personality that we are finished */
4102 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
4104 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4105 mddev->curr_resync > 2 &&
4106 mddev->curr_resync >= mddev->recovery_cp) {
4107 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4109 "md: checkpointing recovery of %s.\n",
4111 mddev->recovery_cp = mddev->curr_resync;
4113 mddev->recovery_cp = MaxSector;
4117 mddev->curr_resync = 0;
4118 wake_up(&resync_wait);
4119 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4120 md_wakeup_thread(mddev->thread);
4125 * This routine is regularly called by all per-raid-array threads to
4126 * deal with generic issues like resync and super-block update.
4127 * Raid personalities that don't have a thread (linear/raid0) do not
4128 * need this as they never do any recovery or update the superblock.
4130 * It does not do any resync itself, but rather "forks" off other threads
4131 * to do that as needed.
4132 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
4133 * "->recovery" and create a thread at ->sync_thread.
4134 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
4135 * and wakeups up this thread which will reap the thread and finish up.
4136 * This thread also removes any faulty devices (with nr_pending == 0).
4138 * The overall approach is:
4139 * 1/ if the superblock needs updating, update it.
4140 * 2/ If a recovery thread is running, don't do anything else.
4141 * 3/ If recovery has finished, clean up, possibly marking spares active.
4142 * 4/ If there are any faulty devices, remove them.
4143 * 5/ If array is degraded, try to add spares devices
4144 * 6/ If array has spares or is not in-sync, start a resync thread.
4146 void md_check_recovery(mddev_t *mddev)
4149 struct list_head *rtmp;
4153 bitmap_daemon_work(mddev->bitmap);
4158 if (signal_pending(current)) {
4159 if (mddev->pers->sync_request) {
4160 printk(KERN_INFO "md: %s in immediate safe mode\n",
4162 mddev->safemode = 2;
4164 flush_signals(current);
4169 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
4170 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
4171 (mddev->safemode == 1) ||
4172 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
4173 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
4177 if (mddev_trylock(mddev)==0) {
4180 spin_lock_irq(&mddev->write_lock);
4181 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
4182 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
4184 mddev->sb_dirty = 1;
4186 if (mddev->safemode == 1)
4187 mddev->safemode = 0;
4188 spin_unlock_irq(&mddev->write_lock);
4190 if (mddev->sb_dirty)
4191 md_update_sb(mddev);
4194 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4195 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
4196 /* resync/recovery still happening */
4197 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4200 if (mddev->sync_thread) {
4201 /* resync has finished, collect result */
4202 md_unregister_thread(mddev->sync_thread);
4203 mddev->sync_thread = NULL;
4204 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4205 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4207 /* activate any spares */
4208 mddev->pers->spare_active(mddev);
4210 md_update_sb(mddev);
4212 /* if array is no-longer degraded, then any saved_raid_disk
4213 * information must be scrapped
4215 if (!mddev->degraded)
4216 ITERATE_RDEV(mddev,rdev,rtmp)
4217 rdev->saved_raid_disk = -1;
4219 mddev->recovery = 0;
4220 /* flag recovery needed just to double check */
4221 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4224 /* Clear some bits that don't mean anything, but
4227 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4228 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
4229 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
4230 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4232 /* no recovery is running.
4233 * remove any failed drives, then
4234 * add spares if possible.
4235 * Spare are also removed and re-added, to allow
4236 * the personality to fail the re-add.
4238 ITERATE_RDEV(mddev,rdev,rtmp)
4239 if (rdev->raid_disk >= 0 &&
4240 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
4241 atomic_read(&rdev->nr_pending)==0) {
4242 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
4244 sprintf(nm,"rd%d", rdev->raid_disk);
4245 sysfs_remove_link(&mddev->kobj, nm);
4246 rdev->raid_disk = -1;
4250 if (mddev->degraded) {
4251 ITERATE_RDEV(mddev,rdev,rtmp)
4252 if (rdev->raid_disk < 0
4253 && !test_bit(Faulty, &rdev->flags)) {
4254 if (mddev->pers->hot_add_disk(mddev,rdev)) {
4256 sprintf(nm, "rd%d", rdev->raid_disk);
4257 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
4265 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4266 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4267 } else if (mddev->recovery_cp < MaxSector) {
4268 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4269 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4270 /* nothing to be done ... */
4273 if (mddev->pers->sync_request) {
4274 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4275 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
4276 /* We are adding a device or devices to an array
4277 * which has the bitmap stored on all devices.
4278 * So make sure all bitmap pages get written
4280 bitmap_write_all(mddev->bitmap);
4282 mddev->sync_thread = md_register_thread(md_do_sync,
4285 if (!mddev->sync_thread) {
4286 printk(KERN_ERR "%s: could not start resync"
4289 /* leave the spares where they are, it shouldn't hurt */
4290 mddev->recovery = 0;
4292 md_wakeup_thread(mddev->sync_thread);
4296 mddev_unlock(mddev);
4300 static int md_notify_reboot(struct notifier_block *this,
4301 unsigned long code, void *x)
4303 struct list_head *tmp;
4306 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
4308 printk(KERN_INFO "md: stopping all md devices.\n");
4310 ITERATE_MDDEV(mddev,tmp)
4311 if (mddev_trylock(mddev)==0)
4312 do_md_stop (mddev, 1);
4314 * certain more exotic SCSI devices are known to be
4315 * volatile wrt too early system reboots. While the
4316 * right place to handle this issue is the given
4317 * driver, we do want to have a safe RAID driver ...
4324 static struct notifier_block md_notifier = {
4325 .notifier_call = md_notify_reboot,
4327 .priority = INT_MAX, /* before any real devices */
4330 static void md_geninit(void)
4332 struct proc_dir_entry *p;
4334 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
4336 p = create_proc_entry("mdstat", S_IRUGO, NULL);
4338 p->proc_fops = &md_seq_fops;
4341 static int __init md_init(void)
4345 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
4346 " MD_SB_DISKS=%d\n",
4347 MD_MAJOR_VERSION, MD_MINOR_VERSION,
4348 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
4349 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
4352 if (register_blkdev(MAJOR_NR, "md"))
4354 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
4355 unregister_blkdev(MAJOR_NR, "md");
4359 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
4360 md_probe, NULL, NULL);
4361 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
4362 md_probe, NULL, NULL);
4364 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4365 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
4366 S_IFBLK|S_IRUSR|S_IWUSR,
4369 for (minor=0; minor < MAX_MD_DEVS; ++minor)
4370 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
4371 S_IFBLK|S_IRUSR|S_IWUSR,
4375 register_reboot_notifier(&md_notifier);
4376 raid_table_header = register_sysctl_table(raid_root_table, 1);
4386 * Searches all registered partitions for autorun RAID arrays
4389 static dev_t detected_devices[128];
4392 void md_autodetect_dev(dev_t dev)
4394 if (dev_cnt >= 0 && dev_cnt < 127)
4395 detected_devices[dev_cnt++] = dev;
4399 static void autostart_arrays(int part)
4404 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4406 for (i = 0; i < dev_cnt; i++) {
4407 dev_t dev = detected_devices[i];
4409 rdev = md_import_device(dev,0, 0);
4413 if (test_bit(Faulty, &rdev->flags)) {
4417 list_add(&rdev->same_set, &pending_raid_disks);
4421 autorun_devices(part);
4426 static __exit void md_exit(void)
4429 struct list_head *tmp;
4431 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4432 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4433 for (i=0; i < MAX_MD_DEVS; i++)
4434 devfs_remove("md/%d", i);
4435 for (i=0; i < MAX_MD_DEVS; i++)
4436 devfs_remove("md/d%d", i);
4440 unregister_blkdev(MAJOR_NR,"md");
4441 unregister_blkdev(mdp_major, "mdp");
4442 unregister_reboot_notifier(&md_notifier);
4443 unregister_sysctl_table(raid_table_header);
4444 remove_proc_entry("mdstat", NULL);
4445 ITERATE_MDDEV(mddev,tmp) {
4446 struct gendisk *disk = mddev->gendisk;
4449 export_array(mddev);
4452 mddev->gendisk = NULL;
4457 module_init(md_init)
4458 module_exit(md_exit)
4460 static int get_ro(char *buffer, struct kernel_param *kp)
4462 return sprintf(buffer, "%d", start_readonly);
4464 static int set_ro(const char *val, struct kernel_param *kp)
4467 int num = simple_strtoul(val, &e, 10);
4468 if (*val && (*e == '\0' || *e == '\n')) {
4469 start_readonly = num;
4475 module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
4477 EXPORT_SYMBOL(register_md_personality);
4478 EXPORT_SYMBOL(unregister_md_personality);
4479 EXPORT_SYMBOL(md_error);
4480 EXPORT_SYMBOL(md_done_sync);
4481 EXPORT_SYMBOL(md_write_start);
4482 EXPORT_SYMBOL(md_write_end);
4483 EXPORT_SYMBOL(md_register_thread);
4484 EXPORT_SYMBOL(md_unregister_thread);
4485 EXPORT_SYMBOL(md_wakeup_thread);
4486 EXPORT_SYMBOL(md_print_devices);
4487 EXPORT_SYMBOL(md_check_recovery);
4488 MODULE_LICENSE("GPL");
4490 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);