2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part);
61 static LIST_HEAD(pers_list);
62 static DEFINE_SPINLOCK(pers_lock);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min = 1000;
84 static int sysctl_speed_limit_max = 200000;
85 static inline int speed_min(mddev_t *mddev)
87 return mddev->sync_speed_min ?
88 mddev->sync_speed_min : sysctl_speed_limit_min;
91 static inline int speed_max(mddev_t *mddev)
93 return mddev->sync_speed_max ?
94 mddev->sync_speed_max : sysctl_speed_limit_max;
97 static struct ctl_table_header *raid_table_header;
99 static ctl_table raid_table[] = {
101 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
102 .procname = "speed_limit_min",
103 .data = &sysctl_speed_limit_min,
104 .maxlen = sizeof(int),
105 .mode = S_IRUGO|S_IWUSR,
106 .proc_handler = &proc_dointvec,
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
110 .procname = "speed_limit_max",
111 .data = &sysctl_speed_limit_max,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
119 static ctl_table raid_dir_table[] = {
121 .ctl_name = DEV_RAID,
124 .mode = S_IRUGO|S_IXUGO,
130 static ctl_table raid_root_table[] = {
136 .child = raid_dir_table,
141 static const struct block_device_operations md_fops;
143 static int start_readonly;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
156 static atomic_t md_event_count;
157 void md_new_event(mddev_t *mddev)
159 atomic_inc(&md_event_count);
160 wake_up(&md_event_waiters);
162 EXPORT_SYMBOL_GPL(md_new_event);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t *mddev)
169 atomic_inc(&md_event_count);
170 wake_up(&md_event_waiters);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs);
178 static DEFINE_SPINLOCK(all_mddevs_lock);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 /* Rather than calling directly into the personality make_request function,
205 * IO requests come here first so that we can check if the device is
206 * being suspended pending a reconfiguration.
207 * We hold a refcount over the call to ->make_request. By the time that
208 * call has finished, the bio has been linked into some internal structure
209 * and so is visible to ->quiesce(), so we don't need the refcount any more.
211 static int md_make_request(struct request_queue *q, struct bio *bio)
213 mddev_t *mddev = q->queuedata;
215 if (mddev == NULL || mddev->pers == NULL) {
220 if (mddev->suspended) {
223 prepare_to_wait(&mddev->sb_wait, &__wait,
224 TASK_UNINTERRUPTIBLE);
225 if (!mddev->suspended)
231 finish_wait(&mddev->sb_wait, &__wait);
233 atomic_inc(&mddev->active_io);
235 rv = mddev->pers->make_request(q, bio);
236 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
237 wake_up(&mddev->sb_wait);
242 static void mddev_suspend(mddev_t *mddev)
244 BUG_ON(mddev->suspended);
245 mddev->suspended = 1;
247 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
248 mddev->pers->quiesce(mddev, 1);
249 md_unregister_thread(mddev->thread);
250 mddev->thread = NULL;
251 /* we now know that no code is executing in the personality module,
252 * except possibly the tail end of a ->bi_end_io function, but that
253 * is certain to complete before the module has a chance to get
258 static void mddev_resume(mddev_t *mddev)
260 mddev->suspended = 0;
261 wake_up(&mddev->sb_wait);
262 mddev->pers->quiesce(mddev, 0);
265 int mddev_congested(mddev_t *mddev, int bits)
267 return mddev->suspended;
269 EXPORT_SYMBOL(mddev_congested);
272 static inline mddev_t *mddev_get(mddev_t *mddev)
274 atomic_inc(&mddev->active);
278 static void mddev_delayed_delete(struct work_struct *ws);
280 static void mddev_put(mddev_t *mddev)
282 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
284 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
285 !mddev->hold_active) {
286 list_del(&mddev->all_mddevs);
287 if (mddev->gendisk) {
288 /* we did a probe so need to clean up.
289 * Call schedule_work inside the spinlock
290 * so that flush_scheduled_work() after
291 * mddev_find will succeed in waiting for the
294 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
295 schedule_work(&mddev->del_work);
299 spin_unlock(&all_mddevs_lock);
302 static mddev_t * mddev_find(dev_t unit)
304 mddev_t *mddev, *new = NULL;
307 spin_lock(&all_mddevs_lock);
310 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
311 if (mddev->unit == unit) {
313 spin_unlock(&all_mddevs_lock);
319 list_add(&new->all_mddevs, &all_mddevs);
320 spin_unlock(&all_mddevs_lock);
321 new->hold_active = UNTIL_IOCTL;
325 /* find an unused unit number */
326 static int next_minor = 512;
327 int start = next_minor;
331 dev = MKDEV(MD_MAJOR, next_minor);
333 if (next_minor > MINORMASK)
335 if (next_minor == start) {
336 /* Oh dear, all in use. */
337 spin_unlock(&all_mddevs_lock);
343 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
344 if (mddev->unit == dev) {
350 new->md_minor = MINOR(dev);
351 new->hold_active = UNTIL_STOP;
352 list_add(&new->all_mddevs, &all_mddevs);
353 spin_unlock(&all_mddevs_lock);
356 spin_unlock(&all_mddevs_lock);
358 new = kzalloc(sizeof(*new), GFP_KERNEL);
363 if (MAJOR(unit) == MD_MAJOR)
364 new->md_minor = MINOR(unit);
366 new->md_minor = MINOR(unit) >> MdpMinorShift;
368 mutex_init(&new->open_mutex);
369 mutex_init(&new->reconfig_mutex);
370 INIT_LIST_HEAD(&new->disks);
371 INIT_LIST_HEAD(&new->all_mddevs);
372 init_timer(&new->safemode_timer);
373 atomic_set(&new->active, 1);
374 atomic_set(&new->openers, 0);
375 atomic_set(&new->active_io, 0);
376 spin_lock_init(&new->write_lock);
377 init_waitqueue_head(&new->sb_wait);
378 init_waitqueue_head(&new->recovery_wait);
379 new->reshape_position = MaxSector;
381 new->resync_max = MaxSector;
382 new->level = LEVEL_NONE;
387 static inline int mddev_lock(mddev_t * mddev)
389 return mutex_lock_interruptible(&mddev->reconfig_mutex);
392 static inline int mddev_is_locked(mddev_t *mddev)
394 return mutex_is_locked(&mddev->reconfig_mutex);
397 static inline int mddev_trylock(mddev_t * mddev)
399 return mutex_trylock(&mddev->reconfig_mutex);
402 static inline void mddev_unlock(mddev_t * mddev)
404 mutex_unlock(&mddev->reconfig_mutex);
406 md_wakeup_thread(mddev->thread);
409 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
413 list_for_each_entry(rdev, &mddev->disks, same_set)
414 if (rdev->desc_nr == nr)
420 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
424 list_for_each_entry(rdev, &mddev->disks, same_set)
425 if (rdev->bdev->bd_dev == dev)
431 static struct mdk_personality *find_pers(int level, char *clevel)
433 struct mdk_personality *pers;
434 list_for_each_entry(pers, &pers_list, list) {
435 if (level != LEVEL_NONE && pers->level == level)
437 if (strcmp(pers->name, clevel)==0)
443 /* return the offset of the super block in 512byte sectors */
444 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
446 sector_t num_sectors = bdev->bd_inode->i_size / 512;
447 return MD_NEW_SIZE_SECTORS(num_sectors);
450 static int alloc_disk_sb(mdk_rdev_t * rdev)
455 rdev->sb_page = alloc_page(GFP_KERNEL);
456 if (!rdev->sb_page) {
457 printk(KERN_ALERT "md: out of memory.\n");
464 static void free_disk_sb(mdk_rdev_t * rdev)
467 put_page(rdev->sb_page);
469 rdev->sb_page = NULL;
476 static void super_written(struct bio *bio, int error)
478 mdk_rdev_t *rdev = bio->bi_private;
479 mddev_t *mddev = rdev->mddev;
481 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
482 printk("md: super_written gets error=%d, uptodate=%d\n",
483 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
484 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
485 md_error(mddev, rdev);
488 if (atomic_dec_and_test(&mddev->pending_writes))
489 wake_up(&mddev->sb_wait);
493 static void super_written_barrier(struct bio *bio, int error)
495 struct bio *bio2 = bio->bi_private;
496 mdk_rdev_t *rdev = bio2->bi_private;
497 mddev_t *mddev = rdev->mddev;
499 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
500 error == -EOPNOTSUPP) {
502 /* barriers don't appear to be supported :-( */
503 set_bit(BarriersNotsupp, &rdev->flags);
504 mddev->barriers_work = 0;
505 spin_lock_irqsave(&mddev->write_lock, flags);
506 bio2->bi_next = mddev->biolist;
507 mddev->biolist = bio2;
508 spin_unlock_irqrestore(&mddev->write_lock, flags);
509 wake_up(&mddev->sb_wait);
513 bio->bi_private = rdev;
514 super_written(bio, error);
518 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
519 sector_t sector, int size, struct page *page)
521 /* write first size bytes of page to sector of rdev
522 * Increment mddev->pending_writes before returning
523 * and decrement it on completion, waking up sb_wait
524 * if zero is reached.
525 * If an error occurred, call md_error
527 * As we might need to resubmit the request if BIO_RW_BARRIER
528 * causes ENOTSUPP, we allocate a spare bio...
530 struct bio *bio = bio_alloc(GFP_NOIO, 1);
531 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
533 bio->bi_bdev = rdev->bdev;
534 bio->bi_sector = sector;
535 bio_add_page(bio, page, size, 0);
536 bio->bi_private = rdev;
537 bio->bi_end_io = super_written;
540 atomic_inc(&mddev->pending_writes);
541 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
543 rw |= (1<<BIO_RW_BARRIER);
544 rbio = bio_clone(bio, GFP_NOIO);
545 rbio->bi_private = bio;
546 rbio->bi_end_io = super_written_barrier;
547 submit_bio(rw, rbio);
552 void md_super_wait(mddev_t *mddev)
554 /* wait for all superblock writes that were scheduled to complete.
555 * if any had to be retried (due to BARRIER problems), retry them
559 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
560 if (atomic_read(&mddev->pending_writes)==0)
562 while (mddev->biolist) {
564 spin_lock_irq(&mddev->write_lock);
565 bio = mddev->biolist;
566 mddev->biolist = bio->bi_next ;
568 spin_unlock_irq(&mddev->write_lock);
569 submit_bio(bio->bi_rw, bio);
573 finish_wait(&mddev->sb_wait, &wq);
576 static void bi_complete(struct bio *bio, int error)
578 complete((struct completion*)bio->bi_private);
581 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
582 struct page *page, int rw)
584 struct bio *bio = bio_alloc(GFP_NOIO, 1);
585 struct completion event;
588 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
591 bio->bi_sector = sector;
592 bio_add_page(bio, page, size, 0);
593 init_completion(&event);
594 bio->bi_private = &event;
595 bio->bi_end_io = bi_complete;
597 wait_for_completion(&event);
599 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
603 EXPORT_SYMBOL_GPL(sync_page_io);
605 static int read_disk_sb(mdk_rdev_t * rdev, int size)
607 char b[BDEVNAME_SIZE];
608 if (!rdev->sb_page) {
616 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
622 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
623 bdevname(rdev->bdev,b));
627 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
629 return sb1->set_uuid0 == sb2->set_uuid0 &&
630 sb1->set_uuid1 == sb2->set_uuid1 &&
631 sb1->set_uuid2 == sb2->set_uuid2 &&
632 sb1->set_uuid3 == sb2->set_uuid3;
635 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
638 mdp_super_t *tmp1, *tmp2;
640 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
641 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
643 if (!tmp1 || !tmp2) {
645 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
653 * nr_disks is not constant
658 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
666 static u32 md_csum_fold(u32 csum)
668 csum = (csum & 0xffff) + (csum >> 16);
669 return (csum & 0xffff) + (csum >> 16);
672 static unsigned int calc_sb_csum(mdp_super_t * sb)
675 u32 *sb32 = (u32*)sb;
677 unsigned int disk_csum, csum;
679 disk_csum = sb->sb_csum;
682 for (i = 0; i < MD_SB_BYTES/4 ; i++)
684 csum = (newcsum & 0xffffffff) + (newcsum>>32);
688 /* This used to use csum_partial, which was wrong for several
689 * reasons including that different results are returned on
690 * different architectures. It isn't critical that we get exactly
691 * the same return value as before (we always csum_fold before
692 * testing, and that removes any differences). However as we
693 * know that csum_partial always returned a 16bit value on
694 * alphas, do a fold to maximise conformity to previous behaviour.
696 sb->sb_csum = md_csum_fold(disk_csum);
698 sb->sb_csum = disk_csum;
705 * Handle superblock details.
706 * We want to be able to handle multiple superblock formats
707 * so we have a common interface to them all, and an array of
708 * different handlers.
709 * We rely on user-space to write the initial superblock, and support
710 * reading and updating of superblocks.
711 * Interface methods are:
712 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
713 * loads and validates a superblock on dev.
714 * if refdev != NULL, compare superblocks on both devices
716 * 0 - dev has a superblock that is compatible with refdev
717 * 1 - dev has a superblock that is compatible and newer than refdev
718 * so dev should be used as the refdev in future
719 * -EINVAL superblock incompatible or invalid
720 * -othererror e.g. -EIO
722 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
723 * Verify that dev is acceptable into mddev.
724 * The first time, mddev->raid_disks will be 0, and data from
725 * dev should be merged in. Subsequent calls check that dev
726 * is new enough. Return 0 or -EINVAL
728 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
729 * Update the superblock for rdev with data in mddev
730 * This does not write to disc.
736 struct module *owner;
737 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
739 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
740 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
741 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
742 sector_t num_sectors);
746 * Check that the given mddev has no bitmap.
748 * This function is called from the run method of all personalities that do not
749 * support bitmaps. It prints an error message and returns non-zero if mddev
750 * has a bitmap. Otherwise, it returns 0.
753 int md_check_no_bitmap(mddev_t *mddev)
755 if (!mddev->bitmap_file && !mddev->bitmap_offset)
757 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
758 mdname(mddev), mddev->pers->name);
761 EXPORT_SYMBOL(md_check_no_bitmap);
764 * load_super for 0.90.0
766 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
768 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
773 * Calculate the position of the superblock (512byte sectors),
774 * it's at the end of the disk.
776 * It also happens to be a multiple of 4Kb.
778 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
780 ret = read_disk_sb(rdev, MD_SB_BYTES);
785 bdevname(rdev->bdev, b);
786 sb = (mdp_super_t*)page_address(rdev->sb_page);
788 if (sb->md_magic != MD_SB_MAGIC) {
789 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
794 if (sb->major_version != 0 ||
795 sb->minor_version < 90 ||
796 sb->minor_version > 91) {
797 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
798 sb->major_version, sb->minor_version,
803 if (sb->raid_disks <= 0)
806 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
807 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
812 rdev->preferred_minor = sb->md_minor;
813 rdev->data_offset = 0;
814 rdev->sb_size = MD_SB_BYTES;
816 if (sb->level == LEVEL_MULTIPATH)
819 rdev->desc_nr = sb->this_disk.number;
825 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
826 if (!uuid_equal(refsb, sb)) {
827 printk(KERN_WARNING "md: %s has different UUID to %s\n",
828 b, bdevname(refdev->bdev,b2));
831 if (!sb_equal(refsb, sb)) {
832 printk(KERN_WARNING "md: %s has same UUID"
833 " but different superblock to %s\n",
834 b, bdevname(refdev->bdev, b2));
838 ev2 = md_event(refsb);
844 rdev->sectors = rdev->sb_start;
846 if (rdev->sectors < sb->size * 2 && sb->level > 1)
847 /* "this cannot possibly happen" ... */
855 * validate_super for 0.90.0
857 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
860 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
861 __u64 ev1 = md_event(sb);
863 rdev->raid_disk = -1;
864 clear_bit(Faulty, &rdev->flags);
865 clear_bit(In_sync, &rdev->flags);
866 clear_bit(WriteMostly, &rdev->flags);
867 clear_bit(BarriersNotsupp, &rdev->flags);
869 if (mddev->raid_disks == 0) {
870 mddev->major_version = 0;
871 mddev->minor_version = sb->minor_version;
872 mddev->patch_version = sb->patch_version;
874 mddev->chunk_sectors = sb->chunk_size >> 9;
875 mddev->ctime = sb->ctime;
876 mddev->utime = sb->utime;
877 mddev->level = sb->level;
878 mddev->clevel[0] = 0;
879 mddev->layout = sb->layout;
880 mddev->raid_disks = sb->raid_disks;
881 mddev->dev_sectors = sb->size * 2;
883 mddev->bitmap_offset = 0;
884 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
886 if (mddev->minor_version >= 91) {
887 mddev->reshape_position = sb->reshape_position;
888 mddev->delta_disks = sb->delta_disks;
889 mddev->new_level = sb->new_level;
890 mddev->new_layout = sb->new_layout;
891 mddev->new_chunk_sectors = sb->new_chunk >> 9;
893 mddev->reshape_position = MaxSector;
894 mddev->delta_disks = 0;
895 mddev->new_level = mddev->level;
896 mddev->new_layout = mddev->layout;
897 mddev->new_chunk_sectors = mddev->chunk_sectors;
900 if (sb->state & (1<<MD_SB_CLEAN))
901 mddev->recovery_cp = MaxSector;
903 if (sb->events_hi == sb->cp_events_hi &&
904 sb->events_lo == sb->cp_events_lo) {
905 mddev->recovery_cp = sb->recovery_cp;
907 mddev->recovery_cp = 0;
910 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
911 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
912 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
913 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
915 mddev->max_disks = MD_SB_DISKS;
917 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
918 mddev->bitmap_file == NULL)
919 mddev->bitmap_offset = mddev->default_bitmap_offset;
921 } else if (mddev->pers == NULL) {
922 /* Insist on good event counter while assembling */
924 if (ev1 < mddev->events)
926 } else if (mddev->bitmap) {
927 /* if adding to array with a bitmap, then we can accept an
928 * older device ... but not too old.
930 if (ev1 < mddev->bitmap->events_cleared)
933 if (ev1 < mddev->events)
934 /* just a hot-add of a new device, leave raid_disk at -1 */
938 if (mddev->level != LEVEL_MULTIPATH) {
939 desc = sb->disks + rdev->desc_nr;
941 if (desc->state & (1<<MD_DISK_FAULTY))
942 set_bit(Faulty, &rdev->flags);
943 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
944 desc->raid_disk < mddev->raid_disks */) {
945 set_bit(In_sync, &rdev->flags);
946 rdev->raid_disk = desc->raid_disk;
948 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
949 set_bit(WriteMostly, &rdev->flags);
950 } else /* MULTIPATH are always insync */
951 set_bit(In_sync, &rdev->flags);
956 * sync_super for 0.90.0
958 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
962 int next_spare = mddev->raid_disks;
965 /* make rdev->sb match mddev data..
968 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
969 * 3/ any empty disks < next_spare become removed
971 * disks[0] gets initialised to REMOVED because
972 * we cannot be sure from other fields if it has
973 * been initialised or not.
976 int active=0, working=0,failed=0,spare=0,nr_disks=0;
978 rdev->sb_size = MD_SB_BYTES;
980 sb = (mdp_super_t*)page_address(rdev->sb_page);
982 memset(sb, 0, sizeof(*sb));
984 sb->md_magic = MD_SB_MAGIC;
985 sb->major_version = mddev->major_version;
986 sb->patch_version = mddev->patch_version;
987 sb->gvalid_words = 0; /* ignored */
988 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
989 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
990 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
991 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
993 sb->ctime = mddev->ctime;
994 sb->level = mddev->level;
995 sb->size = mddev->dev_sectors / 2;
996 sb->raid_disks = mddev->raid_disks;
997 sb->md_minor = mddev->md_minor;
998 sb->not_persistent = 0;
999 sb->utime = mddev->utime;
1001 sb->events_hi = (mddev->events>>32);
1002 sb->events_lo = (u32)mddev->events;
1004 if (mddev->reshape_position == MaxSector)
1005 sb->minor_version = 90;
1007 sb->minor_version = 91;
1008 sb->reshape_position = mddev->reshape_position;
1009 sb->new_level = mddev->new_level;
1010 sb->delta_disks = mddev->delta_disks;
1011 sb->new_layout = mddev->new_layout;
1012 sb->new_chunk = mddev->new_chunk_sectors << 9;
1014 mddev->minor_version = sb->minor_version;
1017 sb->recovery_cp = mddev->recovery_cp;
1018 sb->cp_events_hi = (mddev->events>>32);
1019 sb->cp_events_lo = (u32)mddev->events;
1020 if (mddev->recovery_cp == MaxSector)
1021 sb->state = (1<< MD_SB_CLEAN);
1023 sb->recovery_cp = 0;
1025 sb->layout = mddev->layout;
1026 sb->chunk_size = mddev->chunk_sectors << 9;
1028 if (mddev->bitmap && mddev->bitmap_file == NULL)
1029 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1031 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1032 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1035 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1036 && !test_bit(Faulty, &rdev2->flags))
1037 desc_nr = rdev2->raid_disk;
1039 desc_nr = next_spare++;
1040 rdev2->desc_nr = desc_nr;
1041 d = &sb->disks[rdev2->desc_nr];
1043 d->number = rdev2->desc_nr;
1044 d->major = MAJOR(rdev2->bdev->bd_dev);
1045 d->minor = MINOR(rdev2->bdev->bd_dev);
1046 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
1047 && !test_bit(Faulty, &rdev2->flags))
1048 d->raid_disk = rdev2->raid_disk;
1050 d->raid_disk = rdev2->desc_nr; /* compatibility */
1051 if (test_bit(Faulty, &rdev2->flags))
1052 d->state = (1<<MD_DISK_FAULTY);
1053 else if (test_bit(In_sync, &rdev2->flags)) {
1054 d->state = (1<<MD_DISK_ACTIVE);
1055 d->state |= (1<<MD_DISK_SYNC);
1063 if (test_bit(WriteMostly, &rdev2->flags))
1064 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1066 /* now set the "removed" and "faulty" bits on any missing devices */
1067 for (i=0 ; i < mddev->raid_disks ; i++) {
1068 mdp_disk_t *d = &sb->disks[i];
1069 if (d->state == 0 && d->number == 0) {
1072 d->state = (1<<MD_DISK_REMOVED);
1073 d->state |= (1<<MD_DISK_FAULTY);
1077 sb->nr_disks = nr_disks;
1078 sb->active_disks = active;
1079 sb->working_disks = working;
1080 sb->failed_disks = failed;
1081 sb->spare_disks = spare;
1083 sb->this_disk = sb->disks[rdev->desc_nr];
1084 sb->sb_csum = calc_sb_csum(sb);
1088 * rdev_size_change for 0.90.0
1090 static unsigned long long
1091 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1093 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1094 return 0; /* component must fit device */
1095 if (rdev->mddev->bitmap_offset)
1096 return 0; /* can't move bitmap */
1097 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1098 if (!num_sectors || num_sectors > rdev->sb_start)
1099 num_sectors = rdev->sb_start;
1100 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1102 md_super_wait(rdev->mddev);
1103 return num_sectors / 2; /* kB for sysfs */
1108 * version 1 superblock
1111 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1115 unsigned long long newcsum;
1116 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1117 __le32 *isuper = (__le32*)sb;
1120 disk_csum = sb->sb_csum;
1123 for (i=0; size>=4; size -= 4 )
1124 newcsum += le32_to_cpu(*isuper++);
1127 newcsum += le16_to_cpu(*(__le16*) isuper);
1129 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1130 sb->sb_csum = disk_csum;
1131 return cpu_to_le32(csum);
1134 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1136 struct mdp_superblock_1 *sb;
1139 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1143 * Calculate the position of the superblock in 512byte sectors.
1144 * It is always aligned to a 4K boundary and
1145 * depeding on minor_version, it can be:
1146 * 0: At least 8K, but less than 12K, from end of device
1147 * 1: At start of device
1148 * 2: 4K from start of device.
1150 switch(minor_version) {
1152 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1154 sb_start &= ~(sector_t)(4*2-1);
1165 rdev->sb_start = sb_start;
1167 /* superblock is rarely larger than 1K, but it can be larger,
1168 * and it is safe to read 4k, so we do that
1170 ret = read_disk_sb(rdev, 4096);
1171 if (ret) return ret;
1174 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1176 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1177 sb->major_version != cpu_to_le32(1) ||
1178 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1179 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1180 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1183 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1184 printk("md: invalid superblock checksum on %s\n",
1185 bdevname(rdev->bdev,b));
1188 if (le64_to_cpu(sb->data_size) < 10) {
1189 printk("md: data_size too small on %s\n",
1190 bdevname(rdev->bdev,b));
1194 rdev->preferred_minor = 0xffff;
1195 rdev->data_offset = le64_to_cpu(sb->data_offset);
1196 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1198 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1199 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1200 if (rdev->sb_size & bmask)
1201 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1204 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1207 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1210 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1216 struct mdp_superblock_1 *refsb =
1217 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1219 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1220 sb->level != refsb->level ||
1221 sb->layout != refsb->layout ||
1222 sb->chunksize != refsb->chunksize) {
1223 printk(KERN_WARNING "md: %s has strangely different"
1224 " superblock to %s\n",
1225 bdevname(rdev->bdev,b),
1226 bdevname(refdev->bdev,b2));
1229 ev1 = le64_to_cpu(sb->events);
1230 ev2 = le64_to_cpu(refsb->events);
1238 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1239 le64_to_cpu(sb->data_offset);
1241 rdev->sectors = rdev->sb_start;
1242 if (rdev->sectors < le64_to_cpu(sb->data_size))
1244 rdev->sectors = le64_to_cpu(sb->data_size);
1245 if (le64_to_cpu(sb->size) > rdev->sectors)
1250 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1252 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1253 __u64 ev1 = le64_to_cpu(sb->events);
1255 rdev->raid_disk = -1;
1256 clear_bit(Faulty, &rdev->flags);
1257 clear_bit(In_sync, &rdev->flags);
1258 clear_bit(WriteMostly, &rdev->flags);
1259 clear_bit(BarriersNotsupp, &rdev->flags);
1261 if (mddev->raid_disks == 0) {
1262 mddev->major_version = 1;
1263 mddev->patch_version = 0;
1264 mddev->external = 0;
1265 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1266 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1267 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1268 mddev->level = le32_to_cpu(sb->level);
1269 mddev->clevel[0] = 0;
1270 mddev->layout = le32_to_cpu(sb->layout);
1271 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1272 mddev->dev_sectors = le64_to_cpu(sb->size);
1273 mddev->events = ev1;
1274 mddev->bitmap_offset = 0;
1275 mddev->default_bitmap_offset = 1024 >> 9;
1277 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1278 memcpy(mddev->uuid, sb->set_uuid, 16);
1280 mddev->max_disks = (4096-256)/2;
1282 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1283 mddev->bitmap_file == NULL )
1284 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1286 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1287 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1288 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1289 mddev->new_level = le32_to_cpu(sb->new_level);
1290 mddev->new_layout = le32_to_cpu(sb->new_layout);
1291 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1293 mddev->reshape_position = MaxSector;
1294 mddev->delta_disks = 0;
1295 mddev->new_level = mddev->level;
1296 mddev->new_layout = mddev->layout;
1297 mddev->new_chunk_sectors = mddev->chunk_sectors;
1300 } else if (mddev->pers == NULL) {
1301 /* Insist of good event counter while assembling */
1303 if (ev1 < mddev->events)
1305 } else if (mddev->bitmap) {
1306 /* If adding to array with a bitmap, then we can accept an
1307 * older device, but not too old.
1309 if (ev1 < mddev->bitmap->events_cleared)
1312 if (ev1 < mddev->events)
1313 /* just a hot-add of a new device, leave raid_disk at -1 */
1316 if (mddev->level != LEVEL_MULTIPATH) {
1318 if (rdev->desc_nr < 0 ||
1319 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1323 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1325 case 0xffff: /* spare */
1327 case 0xfffe: /* faulty */
1328 set_bit(Faulty, &rdev->flags);
1331 if ((le32_to_cpu(sb->feature_map) &
1332 MD_FEATURE_RECOVERY_OFFSET))
1333 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1335 set_bit(In_sync, &rdev->flags);
1336 rdev->raid_disk = role;
1339 if (sb->devflags & WriteMostly1)
1340 set_bit(WriteMostly, &rdev->flags);
1341 } else /* MULTIPATH are always insync */
1342 set_bit(In_sync, &rdev->flags);
1347 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1349 struct mdp_superblock_1 *sb;
1352 /* make rdev->sb match mddev and rdev data. */
1354 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1356 sb->feature_map = 0;
1358 sb->recovery_offset = cpu_to_le64(0);
1359 memset(sb->pad1, 0, sizeof(sb->pad1));
1360 memset(sb->pad2, 0, sizeof(sb->pad2));
1361 memset(sb->pad3, 0, sizeof(sb->pad3));
1363 sb->utime = cpu_to_le64((__u64)mddev->utime);
1364 sb->events = cpu_to_le64(mddev->events);
1366 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1368 sb->resync_offset = cpu_to_le64(0);
1370 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1372 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1373 sb->size = cpu_to_le64(mddev->dev_sectors);
1374 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1375 sb->level = cpu_to_le32(mddev->level);
1376 sb->layout = cpu_to_le32(mddev->layout);
1378 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1379 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1380 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1383 if (rdev->raid_disk >= 0 &&
1384 !test_bit(In_sync, &rdev->flags)) {
1385 if (rdev->recovery_offset > 0) {
1387 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1388 sb->recovery_offset =
1389 cpu_to_le64(rdev->recovery_offset);
1393 if (mddev->reshape_position != MaxSector) {
1394 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1395 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1396 sb->new_layout = cpu_to_le32(mddev->new_layout);
1397 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1398 sb->new_level = cpu_to_le32(mddev->new_level);
1399 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1403 list_for_each_entry(rdev2, &mddev->disks, same_set)
1404 if (rdev2->desc_nr+1 > max_dev)
1405 max_dev = rdev2->desc_nr+1;
1407 if (max_dev > le32_to_cpu(sb->max_dev)) {
1409 sb->max_dev = cpu_to_le32(max_dev);
1410 rdev->sb_size = max_dev * 2 + 256;
1411 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1412 if (rdev->sb_size & bmask)
1413 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1415 for (i=0; i<max_dev;i++)
1416 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1418 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1420 if (test_bit(Faulty, &rdev2->flags))
1421 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1422 else if (test_bit(In_sync, &rdev2->flags))
1423 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1424 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1425 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1427 sb->dev_roles[i] = cpu_to_le16(0xffff);
1430 sb->sb_csum = calc_sb_1_csum(sb);
1433 static unsigned long long
1434 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1436 struct mdp_superblock_1 *sb;
1437 sector_t max_sectors;
1438 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1439 return 0; /* component must fit device */
1440 if (rdev->sb_start < rdev->data_offset) {
1441 /* minor versions 1 and 2; superblock before data */
1442 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1443 max_sectors -= rdev->data_offset;
1444 if (!num_sectors || num_sectors > max_sectors)
1445 num_sectors = max_sectors;
1446 } else if (rdev->mddev->bitmap_offset) {
1447 /* minor version 0 with bitmap we can't move */
1450 /* minor version 0; superblock after data */
1452 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1453 sb_start &= ~(sector_t)(4*2 - 1);
1454 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1455 if (!num_sectors || num_sectors > max_sectors)
1456 num_sectors = max_sectors;
1457 rdev->sb_start = sb_start;
1459 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1460 sb->data_size = cpu_to_le64(num_sectors);
1461 sb->super_offset = rdev->sb_start;
1462 sb->sb_csum = calc_sb_1_csum(sb);
1463 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1465 md_super_wait(rdev->mddev);
1466 return num_sectors / 2; /* kB for sysfs */
1469 static struct super_type super_types[] = {
1472 .owner = THIS_MODULE,
1473 .load_super = super_90_load,
1474 .validate_super = super_90_validate,
1475 .sync_super = super_90_sync,
1476 .rdev_size_change = super_90_rdev_size_change,
1480 .owner = THIS_MODULE,
1481 .load_super = super_1_load,
1482 .validate_super = super_1_validate,
1483 .sync_super = super_1_sync,
1484 .rdev_size_change = super_1_rdev_size_change,
1488 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1490 mdk_rdev_t *rdev, *rdev2;
1493 rdev_for_each_rcu(rdev, mddev1)
1494 rdev_for_each_rcu(rdev2, mddev2)
1495 if (rdev->bdev->bd_contains ==
1496 rdev2->bdev->bd_contains) {
1504 static LIST_HEAD(pending_raid_disks);
1507 * Try to register data integrity profile for an mddev
1509 * This is called when an array is started and after a disk has been kicked
1510 * from the array. It only succeeds if all working and active component devices
1511 * are integrity capable with matching profiles.
1513 int md_integrity_register(mddev_t *mddev)
1515 mdk_rdev_t *rdev, *reference = NULL;
1517 if (list_empty(&mddev->disks))
1518 return 0; /* nothing to do */
1519 if (blk_get_integrity(mddev->gendisk))
1520 return 0; /* already registered */
1521 list_for_each_entry(rdev, &mddev->disks, same_set) {
1522 /* skip spares and non-functional disks */
1523 if (test_bit(Faulty, &rdev->flags))
1525 if (rdev->raid_disk < 0)
1528 * If at least one rdev is not integrity capable, we can not
1529 * enable data integrity for the md device.
1531 if (!bdev_get_integrity(rdev->bdev))
1534 /* Use the first rdev as the reference */
1538 /* does this rdev's profile match the reference profile? */
1539 if (blk_integrity_compare(reference->bdev->bd_disk,
1540 rdev->bdev->bd_disk) < 0)
1544 * All component devices are integrity capable and have matching
1545 * profiles, register the common profile for the md device.
1547 if (blk_integrity_register(mddev->gendisk,
1548 bdev_get_integrity(reference->bdev)) != 0) {
1549 printk(KERN_ERR "md: failed to register integrity for %s\n",
1553 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1557 EXPORT_SYMBOL(md_integrity_register);
1559 /* Disable data integrity if non-capable/non-matching disk is being added */
1560 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1562 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1563 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1565 if (!bi_mddev) /* nothing to do */
1567 if (rdev->raid_disk < 0) /* skip spares */
1569 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1570 rdev->bdev->bd_disk) >= 0)
1572 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1573 blk_integrity_unregister(mddev->gendisk);
1575 EXPORT_SYMBOL(md_integrity_add_rdev);
1577 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1579 char b[BDEVNAME_SIZE];
1589 /* prevent duplicates */
1590 if (find_rdev(mddev, rdev->bdev->bd_dev))
1593 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1594 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1595 rdev->sectors < mddev->dev_sectors)) {
1597 /* Cannot change size, so fail
1598 * If mddev->level <= 0, then we don't care
1599 * about aligning sizes (e.g. linear)
1601 if (mddev->level > 0)
1604 mddev->dev_sectors = rdev->sectors;
1607 /* Verify rdev->desc_nr is unique.
1608 * If it is -1, assign a free number, else
1609 * check number is not in use
1611 if (rdev->desc_nr < 0) {
1613 if (mddev->pers) choice = mddev->raid_disks;
1614 while (find_rdev_nr(mddev, choice))
1616 rdev->desc_nr = choice;
1618 if (find_rdev_nr(mddev, rdev->desc_nr))
1621 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1622 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1623 mdname(mddev), mddev->max_disks);
1626 bdevname(rdev->bdev,b);
1627 while ( (s=strchr(b, '/')) != NULL)
1630 rdev->mddev = mddev;
1631 printk(KERN_INFO "md: bind<%s>\n", b);
1633 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1636 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1637 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1638 kobject_del(&rdev->kobj);
1641 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1643 list_add_rcu(&rdev->same_set, &mddev->disks);
1644 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1646 /* May as well allow recovery to be retried once */
1647 mddev->recovery_disabled = 0;
1652 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1657 static void md_delayed_delete(struct work_struct *ws)
1659 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1660 kobject_del(&rdev->kobj);
1661 kobject_put(&rdev->kobj);
1664 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1666 char b[BDEVNAME_SIZE];
1671 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1672 list_del_rcu(&rdev->same_set);
1673 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1675 sysfs_remove_link(&rdev->kobj, "block");
1676 sysfs_put(rdev->sysfs_state);
1677 rdev->sysfs_state = NULL;
1678 /* We need to delay this, otherwise we can deadlock when
1679 * writing to 'remove' to "dev/state". We also need
1680 * to delay it due to rcu usage.
1683 INIT_WORK(&rdev->del_work, md_delayed_delete);
1684 kobject_get(&rdev->kobj);
1685 schedule_work(&rdev->del_work);
1689 * prevent the device from being mounted, repartitioned or
1690 * otherwise reused by a RAID array (or any other kernel
1691 * subsystem), by bd_claiming the device.
1693 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1696 struct block_device *bdev;
1697 char b[BDEVNAME_SIZE];
1699 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1701 printk(KERN_ERR "md: could not open %s.\n",
1702 __bdevname(dev, b));
1703 return PTR_ERR(bdev);
1705 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1707 printk(KERN_ERR "md: could not bd_claim %s.\n",
1709 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1713 set_bit(AllReserved, &rdev->flags);
1718 static void unlock_rdev(mdk_rdev_t *rdev)
1720 struct block_device *bdev = rdev->bdev;
1725 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1728 void md_autodetect_dev(dev_t dev);
1730 static void export_rdev(mdk_rdev_t * rdev)
1732 char b[BDEVNAME_SIZE];
1733 printk(KERN_INFO "md: export_rdev(%s)\n",
1734 bdevname(rdev->bdev,b));
1739 if (test_bit(AutoDetected, &rdev->flags))
1740 md_autodetect_dev(rdev->bdev->bd_dev);
1743 kobject_put(&rdev->kobj);
1746 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1748 unbind_rdev_from_array(rdev);
1752 static void export_array(mddev_t *mddev)
1754 mdk_rdev_t *rdev, *tmp;
1756 rdev_for_each(rdev, tmp, mddev) {
1761 kick_rdev_from_array(rdev);
1763 if (!list_empty(&mddev->disks))
1765 mddev->raid_disks = 0;
1766 mddev->major_version = 0;
1769 static void print_desc(mdp_disk_t *desc)
1771 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1772 desc->major,desc->minor,desc->raid_disk,desc->state);
1775 static void print_sb_90(mdp_super_t *sb)
1780 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1781 sb->major_version, sb->minor_version, sb->patch_version,
1782 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1784 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1785 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1786 sb->md_minor, sb->layout, sb->chunk_size);
1787 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1788 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1789 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1790 sb->failed_disks, sb->spare_disks,
1791 sb->sb_csum, (unsigned long)sb->events_lo);
1794 for (i = 0; i < MD_SB_DISKS; i++) {
1797 desc = sb->disks + i;
1798 if (desc->number || desc->major || desc->minor ||
1799 desc->raid_disk || (desc->state && (desc->state != 4))) {
1800 printk(" D %2d: ", i);
1804 printk(KERN_INFO "md: THIS: ");
1805 print_desc(&sb->this_disk);
1808 static void print_sb_1(struct mdp_superblock_1 *sb)
1812 uuid = sb->set_uuid;
1814 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1815 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1816 "md: Name: \"%s\" CT:%llu\n",
1817 le32_to_cpu(sb->major_version),
1818 le32_to_cpu(sb->feature_map),
1819 uuid[0], uuid[1], uuid[2], uuid[3],
1820 uuid[4], uuid[5], uuid[6], uuid[7],
1821 uuid[8], uuid[9], uuid[10], uuid[11],
1822 uuid[12], uuid[13], uuid[14], uuid[15],
1824 (unsigned long long)le64_to_cpu(sb->ctime)
1825 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1827 uuid = sb->device_uuid;
1829 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1831 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1832 ":%02x%02x%02x%02x%02x%02x\n"
1833 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1834 "md: (MaxDev:%u) \n",
1835 le32_to_cpu(sb->level),
1836 (unsigned long long)le64_to_cpu(sb->size),
1837 le32_to_cpu(sb->raid_disks),
1838 le32_to_cpu(sb->layout),
1839 le32_to_cpu(sb->chunksize),
1840 (unsigned long long)le64_to_cpu(sb->data_offset),
1841 (unsigned long long)le64_to_cpu(sb->data_size),
1842 (unsigned long long)le64_to_cpu(sb->super_offset),
1843 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1844 le32_to_cpu(sb->dev_number),
1845 uuid[0], uuid[1], uuid[2], uuid[3],
1846 uuid[4], uuid[5], uuid[6], uuid[7],
1847 uuid[8], uuid[9], uuid[10], uuid[11],
1848 uuid[12], uuid[13], uuid[14], uuid[15],
1850 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1851 (unsigned long long)le64_to_cpu(sb->events),
1852 (unsigned long long)le64_to_cpu(sb->resync_offset),
1853 le32_to_cpu(sb->sb_csum),
1854 le32_to_cpu(sb->max_dev)
1858 static void print_rdev(mdk_rdev_t *rdev, int major_version)
1860 char b[BDEVNAME_SIZE];
1861 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1862 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
1863 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1865 if (rdev->sb_loaded) {
1866 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1867 switch (major_version) {
1869 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1872 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1876 printk(KERN_INFO "md: no rdev superblock!\n");
1879 static void md_print_devices(void)
1881 struct list_head *tmp;
1884 char b[BDEVNAME_SIZE];
1887 printk("md: **********************************\n");
1888 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1889 printk("md: **********************************\n");
1890 for_each_mddev(mddev, tmp) {
1893 bitmap_print_sb(mddev->bitmap);
1895 printk("%s: ", mdname(mddev));
1896 list_for_each_entry(rdev, &mddev->disks, same_set)
1897 printk("<%s>", bdevname(rdev->bdev,b));
1900 list_for_each_entry(rdev, &mddev->disks, same_set)
1901 print_rdev(rdev, mddev->major_version);
1903 printk("md: **********************************\n");
1908 static void sync_sbs(mddev_t * mddev, int nospares)
1910 /* Update each superblock (in-memory image), but
1911 * if we are allowed to, skip spares which already
1912 * have the right event counter, or have one earlier
1913 * (which would mean they aren't being marked as dirty
1914 * with the rest of the array)
1918 /* First make sure individual recovery_offsets are correct */
1919 list_for_each_entry(rdev, &mddev->disks, same_set) {
1920 if (rdev->raid_disk >= 0 &&
1921 !test_bit(In_sync, &rdev->flags) &&
1922 mddev->curr_resync_completed > rdev->recovery_offset)
1923 rdev->recovery_offset = mddev->curr_resync_completed;
1926 list_for_each_entry(rdev, &mddev->disks, same_set) {
1927 if (rdev->sb_events == mddev->events ||
1929 rdev->raid_disk < 0 &&
1930 (rdev->sb_events&1)==0 &&
1931 rdev->sb_events+1 == mddev->events)) {
1932 /* Don't update this superblock */
1933 rdev->sb_loaded = 2;
1935 super_types[mddev->major_version].
1936 sync_super(mddev, rdev);
1937 rdev->sb_loaded = 1;
1942 static void md_update_sb(mddev_t * mddev, int force_change)
1948 mddev->utime = get_seconds();
1949 if (mddev->external)
1952 spin_lock_irq(&mddev->write_lock);
1954 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1955 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1957 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1958 /* just a clean<-> dirty transition, possibly leave spares alone,
1959 * though if events isn't the right even/odd, we will have to do
1965 if (mddev->degraded)
1966 /* If the array is degraded, then skipping spares is both
1967 * dangerous and fairly pointless.
1968 * Dangerous because a device that was removed from the array
1969 * might have a event_count that still looks up-to-date,
1970 * so it can be re-added without a resync.
1971 * Pointless because if there are any spares to skip,
1972 * then a recovery will happen and soon that array won't
1973 * be degraded any more and the spare can go back to sleep then.
1977 sync_req = mddev->in_sync;
1979 /* If this is just a dirty<->clean transition, and the array is clean
1980 * and 'events' is odd, we can roll back to the previous clean state */
1982 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1983 && (mddev->events & 1)
1984 && mddev->events != 1)
1987 /* otherwise we have to go forward and ... */
1989 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1990 /* .. if the array isn't clean, an 'even' event must also go
1992 if ((mddev->events&1)==0)
1995 /* otherwise an 'odd' event must go to spares */
1996 if ((mddev->events&1))
2001 if (!mddev->events) {
2003 * oops, this 64-bit counter should never wrap.
2004 * Either we are in around ~1 trillion A.C., assuming
2005 * 1 reboot per second, or we have a bug:
2012 * do not write anything to disk if using
2013 * nonpersistent superblocks
2015 if (!mddev->persistent) {
2016 if (!mddev->external)
2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2019 spin_unlock_irq(&mddev->write_lock);
2020 wake_up(&mddev->sb_wait);
2023 sync_sbs(mddev, nospares);
2024 spin_unlock_irq(&mddev->write_lock);
2027 "md: updating %s RAID superblock on device (in sync %d)\n",
2028 mdname(mddev),mddev->in_sync);
2030 bitmap_update_sb(mddev->bitmap);
2031 list_for_each_entry(rdev, &mddev->disks, same_set) {
2032 char b[BDEVNAME_SIZE];
2033 dprintk(KERN_INFO "md: ");
2034 if (rdev->sb_loaded != 1)
2035 continue; /* no noise on spare devices */
2036 if (test_bit(Faulty, &rdev->flags))
2037 dprintk("(skipping faulty ");
2039 dprintk("%s ", bdevname(rdev->bdev,b));
2040 if (!test_bit(Faulty, &rdev->flags)) {
2041 md_super_write(mddev,rdev,
2042 rdev->sb_start, rdev->sb_size,
2044 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2045 bdevname(rdev->bdev,b),
2046 (unsigned long long)rdev->sb_start);
2047 rdev->sb_events = mddev->events;
2051 if (mddev->level == LEVEL_MULTIPATH)
2052 /* only need to write one superblock... */
2055 md_super_wait(mddev);
2056 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2058 spin_lock_irq(&mddev->write_lock);
2059 if (mddev->in_sync != sync_req ||
2060 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2061 /* have to write it out again */
2062 spin_unlock_irq(&mddev->write_lock);
2065 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2066 spin_unlock_irq(&mddev->write_lock);
2067 wake_up(&mddev->sb_wait);
2068 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2069 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2073 /* words written to sysfs files may, or may not, be \n terminated.
2074 * We want to accept with case. For this we use cmd_match.
2076 static int cmd_match(const char *cmd, const char *str)
2078 /* See if cmd, written into a sysfs file, matches
2079 * str. They must either be the same, or cmd can
2080 * have a trailing newline
2082 while (*cmd && *str && *cmd == *str) {
2093 struct rdev_sysfs_entry {
2094 struct attribute attr;
2095 ssize_t (*show)(mdk_rdev_t *, char *);
2096 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2100 state_show(mdk_rdev_t *rdev, char *page)
2105 if (test_bit(Faulty, &rdev->flags)) {
2106 len+= sprintf(page+len, "%sfaulty",sep);
2109 if (test_bit(In_sync, &rdev->flags)) {
2110 len += sprintf(page+len, "%sin_sync",sep);
2113 if (test_bit(WriteMostly, &rdev->flags)) {
2114 len += sprintf(page+len, "%swrite_mostly",sep);
2117 if (test_bit(Blocked, &rdev->flags)) {
2118 len += sprintf(page+len, "%sblocked", sep);
2121 if (!test_bit(Faulty, &rdev->flags) &&
2122 !test_bit(In_sync, &rdev->flags)) {
2123 len += sprintf(page+len, "%sspare", sep);
2126 return len+sprintf(page+len, "\n");
2130 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2133 * faulty - simulates and error
2134 * remove - disconnects the device
2135 * writemostly - sets write_mostly
2136 * -writemostly - clears write_mostly
2137 * blocked - sets the Blocked flag
2138 * -blocked - clears the Blocked flag
2139 * insync - sets Insync providing device isn't active
2142 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2143 md_error(rdev->mddev, rdev);
2145 } else if (cmd_match(buf, "remove")) {
2146 if (rdev->raid_disk >= 0)
2149 mddev_t *mddev = rdev->mddev;
2150 kick_rdev_from_array(rdev);
2152 md_update_sb(mddev, 1);
2153 md_new_event(mddev);
2156 } else if (cmd_match(buf, "writemostly")) {
2157 set_bit(WriteMostly, &rdev->flags);
2159 } else if (cmd_match(buf, "-writemostly")) {
2160 clear_bit(WriteMostly, &rdev->flags);
2162 } else if (cmd_match(buf, "blocked")) {
2163 set_bit(Blocked, &rdev->flags);
2165 } else if (cmd_match(buf, "-blocked")) {
2166 clear_bit(Blocked, &rdev->flags);
2167 wake_up(&rdev->blocked_wait);
2168 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2169 md_wakeup_thread(rdev->mddev->thread);
2172 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2173 set_bit(In_sync, &rdev->flags);
2176 if (!err && rdev->sysfs_state)
2177 sysfs_notify_dirent(rdev->sysfs_state);
2178 return err ? err : len;
2180 static struct rdev_sysfs_entry rdev_state =
2181 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2184 errors_show(mdk_rdev_t *rdev, char *page)
2186 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2190 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2193 unsigned long n = simple_strtoul(buf, &e, 10);
2194 if (*buf && (*e == 0 || *e == '\n')) {
2195 atomic_set(&rdev->corrected_errors, n);
2200 static struct rdev_sysfs_entry rdev_errors =
2201 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2204 slot_show(mdk_rdev_t *rdev, char *page)
2206 if (rdev->raid_disk < 0)
2207 return sprintf(page, "none\n");
2209 return sprintf(page, "%d\n", rdev->raid_disk);
2213 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2218 int slot = simple_strtoul(buf, &e, 10);
2219 if (strncmp(buf, "none", 4)==0)
2221 else if (e==buf || (*e && *e!= '\n'))
2223 if (rdev->mddev->pers && slot == -1) {
2224 /* Setting 'slot' on an active array requires also
2225 * updating the 'rd%d' link, and communicating
2226 * with the personality with ->hot_*_disk.
2227 * For now we only support removing
2228 * failed/spare devices. This normally happens automatically,
2229 * but not when the metadata is externally managed.
2231 if (rdev->raid_disk == -1)
2233 /* personality does all needed checks */
2234 if (rdev->mddev->pers->hot_add_disk == NULL)
2236 err = rdev->mddev->pers->
2237 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2240 sprintf(nm, "rd%d", rdev->raid_disk);
2241 sysfs_remove_link(&rdev->mddev->kobj, nm);
2242 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2243 md_wakeup_thread(rdev->mddev->thread);
2244 } else if (rdev->mddev->pers) {
2246 /* Activating a spare .. or possibly reactivating
2247 * if we ever get bitmaps working here.
2250 if (rdev->raid_disk != -1)
2253 if (rdev->mddev->pers->hot_add_disk == NULL)
2256 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2257 if (rdev2->raid_disk == slot)
2260 rdev->raid_disk = slot;
2261 if (test_bit(In_sync, &rdev->flags))
2262 rdev->saved_raid_disk = slot;
2264 rdev->saved_raid_disk = -1;
2265 err = rdev->mddev->pers->
2266 hot_add_disk(rdev->mddev, rdev);
2268 rdev->raid_disk = -1;
2271 sysfs_notify_dirent(rdev->sysfs_state);
2272 sprintf(nm, "rd%d", rdev->raid_disk);
2273 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2275 "md: cannot register "
2277 nm, mdname(rdev->mddev));
2279 /* don't wakeup anyone, leave that to userspace. */
2281 if (slot >= rdev->mddev->raid_disks)
2283 rdev->raid_disk = slot;
2284 /* assume it is working */
2285 clear_bit(Faulty, &rdev->flags);
2286 clear_bit(WriteMostly, &rdev->flags);
2287 set_bit(In_sync, &rdev->flags);
2288 sysfs_notify_dirent(rdev->sysfs_state);
2294 static struct rdev_sysfs_entry rdev_slot =
2295 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2298 offset_show(mdk_rdev_t *rdev, char *page)
2300 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2304 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2307 unsigned long long offset = simple_strtoull(buf, &e, 10);
2308 if (e==buf || (*e && *e != '\n'))
2310 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2312 if (rdev->sectors && rdev->mddev->external)
2313 /* Must set offset before size, so overlap checks
2316 rdev->data_offset = offset;
2320 static struct rdev_sysfs_entry rdev_offset =
2321 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2324 rdev_size_show(mdk_rdev_t *rdev, char *page)
2326 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2329 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2331 /* check if two start/length pairs overlap */
2339 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2341 unsigned long long blocks;
2344 if (strict_strtoull(buf, 10, &blocks) < 0)
2347 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2348 return -EINVAL; /* sector conversion overflow */
2351 if (new != blocks * 2)
2352 return -EINVAL; /* unsigned long long to sector_t overflow */
2359 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2361 mddev_t *my_mddev = rdev->mddev;
2362 sector_t oldsectors = rdev->sectors;
2365 if (strict_blocks_to_sectors(buf, §ors) < 0)
2367 if (my_mddev->pers && rdev->raid_disk >= 0) {
2368 if (my_mddev->persistent) {
2369 sectors = super_types[my_mddev->major_version].
2370 rdev_size_change(rdev, sectors);
2373 } else if (!sectors)
2374 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2377 if (sectors < my_mddev->dev_sectors)
2378 return -EINVAL; /* component must fit device */
2380 rdev->sectors = sectors;
2381 if (sectors > oldsectors && my_mddev->external) {
2382 /* need to check that all other rdevs with the same ->bdev
2383 * do not overlap. We need to unlock the mddev to avoid
2384 * a deadlock. We have already changed rdev->sectors, and if
2385 * we have to change it back, we will have the lock again.
2389 struct list_head *tmp;
2391 mddev_unlock(my_mddev);
2392 for_each_mddev(mddev, tmp) {
2396 list_for_each_entry(rdev2, &mddev->disks, same_set)
2397 if (test_bit(AllReserved, &rdev2->flags) ||
2398 (rdev->bdev == rdev2->bdev &&
2400 overlaps(rdev->data_offset, rdev->sectors,
2406 mddev_unlock(mddev);
2412 mddev_lock(my_mddev);
2414 /* Someone else could have slipped in a size
2415 * change here, but doing so is just silly.
2416 * We put oldsectors back because we *know* it is
2417 * safe, and trust userspace not to race with
2420 rdev->sectors = oldsectors;
2427 static struct rdev_sysfs_entry rdev_size =
2428 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2430 static struct attribute *rdev_default_attrs[] = {
2439 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2441 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2442 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2443 mddev_t *mddev = rdev->mddev;
2449 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2451 if (rdev->mddev == NULL)
2454 rv = entry->show(rdev, page);
2455 mddev_unlock(mddev);
2461 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2462 const char *page, size_t length)
2464 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2465 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2467 mddev_t *mddev = rdev->mddev;
2471 if (!capable(CAP_SYS_ADMIN))
2473 rv = mddev ? mddev_lock(mddev): -EBUSY;
2475 if (rdev->mddev == NULL)
2478 rv = entry->store(rdev, page, length);
2479 mddev_unlock(mddev);
2484 static void rdev_free(struct kobject *ko)
2486 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2489 static struct sysfs_ops rdev_sysfs_ops = {
2490 .show = rdev_attr_show,
2491 .store = rdev_attr_store,
2493 static struct kobj_type rdev_ktype = {
2494 .release = rdev_free,
2495 .sysfs_ops = &rdev_sysfs_ops,
2496 .default_attrs = rdev_default_attrs,
2500 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2502 * mark the device faulty if:
2504 * - the device is nonexistent (zero size)
2505 * - the device has no valid superblock
2507 * a faulty rdev _never_ has rdev->sb set.
2509 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2511 char b[BDEVNAME_SIZE];
2516 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2518 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2519 return ERR_PTR(-ENOMEM);
2522 if ((err = alloc_disk_sb(rdev)))
2525 err = lock_rdev(rdev, newdev, super_format == -2);
2529 kobject_init(&rdev->kobj, &rdev_ktype);
2532 rdev->saved_raid_disk = -1;
2533 rdev->raid_disk = -1;
2535 rdev->data_offset = 0;
2536 rdev->sb_events = 0;
2537 atomic_set(&rdev->nr_pending, 0);
2538 atomic_set(&rdev->read_errors, 0);
2539 atomic_set(&rdev->corrected_errors, 0);
2541 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2544 "md: %s has zero or unknown size, marking faulty!\n",
2545 bdevname(rdev->bdev,b));
2550 if (super_format >= 0) {
2551 err = super_types[super_format].
2552 load_super(rdev, NULL, super_minor);
2553 if (err == -EINVAL) {
2555 "md: %s does not have a valid v%d.%d "
2556 "superblock, not importing!\n",
2557 bdevname(rdev->bdev,b),
2558 super_format, super_minor);
2563 "md: could not read %s's sb, not importing!\n",
2564 bdevname(rdev->bdev,b));
2569 INIT_LIST_HEAD(&rdev->same_set);
2570 init_waitqueue_head(&rdev->blocked_wait);
2575 if (rdev->sb_page) {
2581 return ERR_PTR(err);
2585 * Check a full RAID array for plausibility
2589 static void analyze_sbs(mddev_t * mddev)
2592 mdk_rdev_t *rdev, *freshest, *tmp;
2593 char b[BDEVNAME_SIZE];
2596 rdev_for_each(rdev, tmp, mddev)
2597 switch (super_types[mddev->major_version].
2598 load_super(rdev, freshest, mddev->minor_version)) {
2606 "md: fatal superblock inconsistency in %s"
2607 " -- removing from array\n",
2608 bdevname(rdev->bdev,b));
2609 kick_rdev_from_array(rdev);
2613 super_types[mddev->major_version].
2614 validate_super(mddev, freshest);
2617 rdev_for_each(rdev, tmp, mddev) {
2618 if (rdev->desc_nr >= mddev->max_disks ||
2619 i > mddev->max_disks) {
2621 "md: %s: %s: only %d devices permitted\n",
2622 mdname(mddev), bdevname(rdev->bdev, b),
2624 kick_rdev_from_array(rdev);
2627 if (rdev != freshest)
2628 if (super_types[mddev->major_version].
2629 validate_super(mddev, rdev)) {
2630 printk(KERN_WARNING "md: kicking non-fresh %s"
2632 bdevname(rdev->bdev,b));
2633 kick_rdev_from_array(rdev);
2636 if (mddev->level == LEVEL_MULTIPATH) {
2637 rdev->desc_nr = i++;
2638 rdev->raid_disk = rdev->desc_nr;
2639 set_bit(In_sync, &rdev->flags);
2640 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2641 rdev->raid_disk = -1;
2642 clear_bit(In_sync, &rdev->flags);
2647 static void md_safemode_timeout(unsigned long data);
2650 safe_delay_show(mddev_t *mddev, char *page)
2652 int msec = (mddev->safemode_delay*1000)/HZ;
2653 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2656 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2664 /* remove a period, and count digits after it */
2665 if (len >= sizeof(buf))
2667 strlcpy(buf, cbuf, sizeof(buf));
2668 for (i=0; i<len; i++) {
2670 if (isdigit(buf[i])) {
2675 } else if (buf[i] == '.') {
2680 if (strict_strtoul(buf, 10, &msec) < 0)
2682 msec = (msec * 1000) / scale;
2684 mddev->safemode_delay = 0;
2686 unsigned long old_delay = mddev->safemode_delay;
2687 mddev->safemode_delay = (msec*HZ)/1000;
2688 if (mddev->safemode_delay == 0)
2689 mddev->safemode_delay = 1;
2690 if (mddev->safemode_delay < old_delay)
2691 md_safemode_timeout((unsigned long)mddev);
2695 static struct md_sysfs_entry md_safe_delay =
2696 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2699 level_show(mddev_t *mddev, char *page)
2701 struct mdk_personality *p = mddev->pers;
2703 return sprintf(page, "%s\n", p->name);
2704 else if (mddev->clevel[0])
2705 return sprintf(page, "%s\n", mddev->clevel);
2706 else if (mddev->level != LEVEL_NONE)
2707 return sprintf(page, "%d\n", mddev->level);
2713 level_store(mddev_t *mddev, const char *buf, size_t len)
2717 struct mdk_personality *pers;
2721 if (mddev->pers == NULL) {
2724 if (len >= sizeof(mddev->clevel))
2726 strncpy(mddev->clevel, buf, len);
2727 if (mddev->clevel[len-1] == '\n')
2729 mddev->clevel[len] = 0;
2730 mddev->level = LEVEL_NONE;
2734 /* request to change the personality. Need to ensure:
2735 * - array is not engaged in resync/recovery/reshape
2736 * - old personality can be suspended
2737 * - new personality will access other array.
2740 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2743 if (!mddev->pers->quiesce) {
2744 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2745 mdname(mddev), mddev->pers->name);
2749 /* Now find the new personality */
2750 if (len == 0 || len >= sizeof(level))
2752 strncpy(level, buf, len);
2753 if (level[len-1] == '\n')
2757 request_module("md-%s", level);
2758 spin_lock(&pers_lock);
2759 pers = find_pers(LEVEL_NONE, level);
2760 if (!pers || !try_module_get(pers->owner)) {
2761 spin_unlock(&pers_lock);
2762 printk(KERN_WARNING "md: personality %s not loaded\n", level);
2765 spin_unlock(&pers_lock);
2767 if (pers == mddev->pers) {
2768 /* Nothing to do! */
2769 module_put(pers->owner);
2772 if (!pers->takeover) {
2773 module_put(pers->owner);
2774 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2775 mdname(mddev), level);
2779 /* ->takeover must set new_* and/or delta_disks
2780 * if it succeeds, and may set them when it fails.
2782 priv = pers->takeover(mddev);
2784 mddev->new_level = mddev->level;
2785 mddev->new_layout = mddev->layout;
2786 mddev->new_chunk_sectors = mddev->chunk_sectors;
2787 mddev->raid_disks -= mddev->delta_disks;
2788 mddev->delta_disks = 0;
2789 module_put(pers->owner);
2790 printk(KERN_WARNING "md: %s: %s would not accept array\n",
2791 mdname(mddev), level);
2792 return PTR_ERR(priv);
2795 /* Looks like we have a winner */
2796 mddev_suspend(mddev);
2797 mddev->pers->stop(mddev);
2798 module_put(mddev->pers->owner);
2799 /* Invalidate devices that are now superfluous */
2800 list_for_each_entry(rdev, &mddev->disks, same_set)
2801 if (rdev->raid_disk >= mddev->raid_disks) {
2802 rdev->raid_disk = -1;
2803 clear_bit(In_sync, &rdev->flags);
2806 mddev->private = priv;
2807 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2808 mddev->level = mddev->new_level;
2809 mddev->layout = mddev->new_layout;
2810 mddev->chunk_sectors = mddev->new_chunk_sectors;
2811 mddev->delta_disks = 0;
2813 mddev_resume(mddev);
2814 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2815 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2816 md_wakeup_thread(mddev->thread);
2820 static struct md_sysfs_entry md_level =
2821 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2825 layout_show(mddev_t *mddev, char *page)
2827 /* just a number, not meaningful for all levels */
2828 if (mddev->reshape_position != MaxSector &&
2829 mddev->layout != mddev->new_layout)
2830 return sprintf(page, "%d (%d)\n",
2831 mddev->new_layout, mddev->layout);
2832 return sprintf(page, "%d\n", mddev->layout);
2836 layout_store(mddev_t *mddev, const char *buf, size_t len)
2839 unsigned long n = simple_strtoul(buf, &e, 10);
2841 if (!*buf || (*e && *e != '\n'))
2846 if (mddev->pers->check_reshape == NULL)
2848 mddev->new_layout = n;
2849 err = mddev->pers->check_reshape(mddev);
2851 mddev->new_layout = mddev->layout;
2855 mddev->new_layout = n;
2856 if (mddev->reshape_position == MaxSector)
2861 static struct md_sysfs_entry md_layout =
2862 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2866 raid_disks_show(mddev_t *mddev, char *page)
2868 if (mddev->raid_disks == 0)
2870 if (mddev->reshape_position != MaxSector &&
2871 mddev->delta_disks != 0)
2872 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2873 mddev->raid_disks - mddev->delta_disks);
2874 return sprintf(page, "%d\n", mddev->raid_disks);
2877 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2880 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2884 unsigned long n = simple_strtoul(buf, &e, 10);
2886 if (!*buf || (*e && *e != '\n'))
2890 rv = update_raid_disks(mddev, n);
2891 else if (mddev->reshape_position != MaxSector) {
2892 int olddisks = mddev->raid_disks - mddev->delta_disks;
2893 mddev->delta_disks = n - olddisks;
2894 mddev->raid_disks = n;
2896 mddev->raid_disks = n;
2897 return rv ? rv : len;
2899 static struct md_sysfs_entry md_raid_disks =
2900 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2903 chunk_size_show(mddev_t *mddev, char *page)
2905 if (mddev->reshape_position != MaxSector &&
2906 mddev->chunk_sectors != mddev->new_chunk_sectors)
2907 return sprintf(page, "%d (%d)\n",
2908 mddev->new_chunk_sectors << 9,
2909 mddev->chunk_sectors << 9);
2910 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
2914 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2917 unsigned long n = simple_strtoul(buf, &e, 10);
2919 if (!*buf || (*e && *e != '\n'))
2924 if (mddev->pers->check_reshape == NULL)
2926 mddev->new_chunk_sectors = n >> 9;
2927 err = mddev->pers->check_reshape(mddev);
2929 mddev->new_chunk_sectors = mddev->chunk_sectors;
2933 mddev->new_chunk_sectors = n >> 9;
2934 if (mddev->reshape_position == MaxSector)
2935 mddev->chunk_sectors = n >> 9;
2939 static struct md_sysfs_entry md_chunk_size =
2940 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2943 resync_start_show(mddev_t *mddev, char *page)
2945 if (mddev->recovery_cp == MaxSector)
2946 return sprintf(page, "none\n");
2947 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2951 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2954 unsigned long long n = simple_strtoull(buf, &e, 10);
2958 if (!*buf || (*e && *e != '\n'))
2961 mddev->recovery_cp = n;
2964 static struct md_sysfs_entry md_resync_start =
2965 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2968 * The array state can be:
2971 * No devices, no size, no level
2972 * Equivalent to STOP_ARRAY ioctl
2974 * May have some settings, but array is not active
2975 * all IO results in error
2976 * When written, doesn't tear down array, but just stops it
2977 * suspended (not supported yet)
2978 * All IO requests will block. The array can be reconfigured.
2979 * Writing this, if accepted, will block until array is quiescent
2981 * no resync can happen. no superblocks get written.
2982 * write requests fail
2984 * like readonly, but behaves like 'clean' on a write request.
2986 * clean - no pending writes, but otherwise active.
2987 * When written to inactive array, starts without resync
2988 * If a write request arrives then
2989 * if metadata is known, mark 'dirty' and switch to 'active'.
2990 * if not known, block and switch to write-pending
2991 * If written to an active array that has pending writes, then fails.
2993 * fully active: IO and resync can be happening.
2994 * When written to inactive array, starts with resync
2997 * clean, but writes are blocked waiting for 'active' to be written.
3000 * like active, but no writes have been seen for a while (100msec).
3003 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3004 write_pending, active_idle, bad_word};
3005 static char *array_states[] = {
3006 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3007 "write-pending", "active-idle", NULL };
3009 static int match_word(const char *word, char **list)
3012 for (n=0; list[n]; n++)
3013 if (cmd_match(word, list[n]))
3019 array_state_show(mddev_t *mddev, char *page)
3021 enum array_state st = inactive;
3034 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3036 else if (mddev->safemode)
3042 if (list_empty(&mddev->disks) &&
3043 mddev->raid_disks == 0 &&
3044 mddev->dev_sectors == 0)
3049 return sprintf(page, "%s\n", array_states[st]);
3052 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3053 static int do_md_run(mddev_t * mddev);
3054 static int restart_array(mddev_t *mddev);
3057 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3060 enum array_state st = match_word(buf, array_states);
3065 /* stopping an active array */
3066 if (atomic_read(&mddev->openers) > 0)
3068 err = do_md_stop(mddev, 0, 0);
3071 /* stopping an active array */
3073 if (atomic_read(&mddev->openers) > 0)
3075 err = do_md_stop(mddev, 2, 0);
3077 err = 0; /* already inactive */
3080 break; /* not supported yet */
3083 err = do_md_stop(mddev, 1, 0);
3086 set_disk_ro(mddev->gendisk, 1);
3087 err = do_md_run(mddev);
3093 err = do_md_stop(mddev, 1, 0);
3094 else if (mddev->ro == 1)
3095 err = restart_array(mddev);
3098 set_disk_ro(mddev->gendisk, 0);
3102 err = do_md_run(mddev);
3107 restart_array(mddev);
3108 spin_lock_irq(&mddev->write_lock);
3109 if (atomic_read(&mddev->writes_pending) == 0) {
3110 if (mddev->in_sync == 0) {
3112 if (mddev->safemode == 1)
3113 mddev->safemode = 0;
3114 if (mddev->persistent)
3115 set_bit(MD_CHANGE_CLEAN,
3121 spin_unlock_irq(&mddev->write_lock);
3127 restart_array(mddev);
3128 if (mddev->external)
3129 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3130 wake_up(&mddev->sb_wait);
3134 set_disk_ro(mddev->gendisk, 0);
3135 err = do_md_run(mddev);
3140 /* these cannot be set */
3146 sysfs_notify_dirent(mddev->sysfs_state);
3150 static struct md_sysfs_entry md_array_state =
3151 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3154 null_show(mddev_t *mddev, char *page)
3160 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3162 /* buf must be %d:%d\n? giving major and minor numbers */
3163 /* The new device is added to the array.
3164 * If the array has a persistent superblock, we read the
3165 * superblock to initialise info and check validity.
3166 * Otherwise, only checking done is that in bind_rdev_to_array,
3167 * which mainly checks size.
3170 int major = simple_strtoul(buf, &e, 10);
3176 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3178 minor = simple_strtoul(e+1, &e, 10);
3179 if (*e && *e != '\n')
3181 dev = MKDEV(major, minor);
3182 if (major != MAJOR(dev) ||
3183 minor != MINOR(dev))
3187 if (mddev->persistent) {
3188 rdev = md_import_device(dev, mddev->major_version,
3189 mddev->minor_version);
3190 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3191 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3192 mdk_rdev_t, same_set);
3193 err = super_types[mddev->major_version]
3194 .load_super(rdev, rdev0, mddev->minor_version);
3198 } else if (mddev->external)
3199 rdev = md_import_device(dev, -2, -1);
3201 rdev = md_import_device(dev, -1, -1);
3204 return PTR_ERR(rdev);
3205 err = bind_rdev_to_array(rdev, mddev);
3209 return err ? err : len;
3212 static struct md_sysfs_entry md_new_device =
3213 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3216 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3219 unsigned long chunk, end_chunk;
3223 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3225 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3226 if (buf == end) break;
3227 if (*end == '-') { /* range */
3229 end_chunk = simple_strtoul(buf, &end, 0);
3230 if (buf == end) break;
3232 if (*end && !isspace(*end)) break;
3233 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3235 while (isspace(*buf)) buf++;
3237 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3242 static struct md_sysfs_entry md_bitmap =
3243 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3246 size_show(mddev_t *mddev, char *page)
3248 return sprintf(page, "%llu\n",
3249 (unsigned long long)mddev->dev_sectors / 2);
3252 static int update_size(mddev_t *mddev, sector_t num_sectors);
3255 size_store(mddev_t *mddev, const char *buf, size_t len)
3257 /* If array is inactive, we can reduce the component size, but
3258 * not increase it (except from 0).
3259 * If array is active, we can try an on-line resize
3262 int err = strict_blocks_to_sectors(buf, §ors);
3267 err = update_size(mddev, sectors);
3268 md_update_sb(mddev, 1);
3270 if (mddev->dev_sectors == 0 ||
3271 mddev->dev_sectors > sectors)
3272 mddev->dev_sectors = sectors;
3276 return err ? err : len;
3279 static struct md_sysfs_entry md_size =
3280 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3285 * 'none' for arrays with no metadata (good luck...)
3286 * 'external' for arrays with externally managed metadata,
3287 * or N.M for internally known formats
3290 metadata_show(mddev_t *mddev, char *page)
3292 if (mddev->persistent)
3293 return sprintf(page, "%d.%d\n",
3294 mddev->major_version, mddev->minor_version);
3295 else if (mddev->external)
3296 return sprintf(page, "external:%s\n", mddev->metadata_type);
3298 return sprintf(page, "none\n");
3302 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3306 /* Changing the details of 'external' metadata is
3307 * always permitted. Otherwise there must be
3308 * no devices attached to the array.
3310 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3312 else if (!list_empty(&mddev->disks))
3315 if (cmd_match(buf, "none")) {
3316 mddev->persistent = 0;
3317 mddev->external = 0;
3318 mddev->major_version = 0;
3319 mddev->minor_version = 90;
3322 if (strncmp(buf, "external:", 9) == 0) {
3323 size_t namelen = len-9;
3324 if (namelen >= sizeof(mddev->metadata_type))
3325 namelen = sizeof(mddev->metadata_type)-1;
3326 strncpy(mddev->metadata_type, buf+9, namelen);
3327 mddev->metadata_type[namelen] = 0;
3328 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3329 mddev->metadata_type[--namelen] = 0;
3330 mddev->persistent = 0;
3331 mddev->external = 1;
3332 mddev->major_version = 0;
3333 mddev->minor_version = 90;
3336 major = simple_strtoul(buf, &e, 10);
3337 if (e==buf || *e != '.')
3340 minor = simple_strtoul(buf, &e, 10);
3341 if (e==buf || (*e && *e != '\n') )
3343 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3345 mddev->major_version = major;
3346 mddev->minor_version = minor;
3347 mddev->persistent = 1;
3348 mddev->external = 0;
3352 static struct md_sysfs_entry md_metadata =
3353 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3356 action_show(mddev_t *mddev, char *page)
3358 char *type = "idle";
3359 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3361 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3362 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3363 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3365 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3366 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3368 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3372 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3375 return sprintf(page, "%s\n", type);
3379 action_store(mddev_t *mddev, const char *page, size_t len)
3381 if (!mddev->pers || !mddev->pers->sync_request)
3384 if (cmd_match(page, "frozen"))
3385 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3387 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3389 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3390 if (mddev->sync_thread) {
3391 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3392 md_unregister_thread(mddev->sync_thread);
3393 mddev->sync_thread = NULL;
3394 mddev->recovery = 0;
3396 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3397 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3399 else if (cmd_match(page, "resync"))
3400 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3401 else if (cmd_match(page, "recover")) {
3402 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3403 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3404 } else if (cmd_match(page, "reshape")) {
3406 if (mddev->pers->start_reshape == NULL)
3408 err = mddev->pers->start_reshape(mddev);
3411 sysfs_notify(&mddev->kobj, NULL, "degraded");
3413 if (cmd_match(page, "check"))
3414 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3415 else if (!cmd_match(page, "repair"))
3417 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3418 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3420 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3421 md_wakeup_thread(mddev->thread);
3422 sysfs_notify_dirent(mddev->sysfs_action);
3427 mismatch_cnt_show(mddev_t *mddev, char *page)
3429 return sprintf(page, "%llu\n",
3430 (unsigned long long) mddev->resync_mismatches);
3433 static struct md_sysfs_entry md_scan_mode =
3434 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3437 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3440 sync_min_show(mddev_t *mddev, char *page)
3442 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3443 mddev->sync_speed_min ? "local": "system");
3447 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3451 if (strncmp(buf, "system", 6)==0) {
3452 mddev->sync_speed_min = 0;
3455 min = simple_strtoul(buf, &e, 10);
3456 if (buf == e || (*e && *e != '\n') || min <= 0)
3458 mddev->sync_speed_min = min;
3462 static struct md_sysfs_entry md_sync_min =
3463 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3466 sync_max_show(mddev_t *mddev, char *page)
3468 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3469 mddev->sync_speed_max ? "local": "system");
3473 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3477 if (strncmp(buf, "system", 6)==0) {
3478 mddev->sync_speed_max = 0;
3481 max = simple_strtoul(buf, &e, 10);
3482 if (buf == e || (*e && *e != '\n') || max <= 0)
3484 mddev->sync_speed_max = max;
3488 static struct md_sysfs_entry md_sync_max =
3489 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3492 degraded_show(mddev_t *mddev, char *page)
3494 return sprintf(page, "%d\n", mddev->degraded);
3496 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3499 sync_force_parallel_show(mddev_t *mddev, char *page)
3501 return sprintf(page, "%d\n", mddev->parallel_resync);
3505 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3509 if (strict_strtol(buf, 10, &n))
3512 if (n != 0 && n != 1)
3515 mddev->parallel_resync = n;
3517 if (mddev->sync_thread)
3518 wake_up(&resync_wait);
3523 /* force parallel resync, even with shared block devices */
3524 static struct md_sysfs_entry md_sync_force_parallel =
3525 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3526 sync_force_parallel_show, sync_force_parallel_store);
3529 sync_speed_show(mddev_t *mddev, char *page)
3531 unsigned long resync, dt, db;
3532 if (mddev->curr_resync == 0)
3533 return sprintf(page, "none\n");
3534 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3535 dt = (jiffies - mddev->resync_mark) / HZ;
3537 db = resync - mddev->resync_mark_cnt;
3538 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3541 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3544 sync_completed_show(mddev_t *mddev, char *page)
3546 unsigned long max_sectors, resync;
3548 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3549 return sprintf(page, "none\n");
3551 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3552 max_sectors = mddev->resync_max_sectors;
3554 max_sectors = mddev->dev_sectors;
3556 resync = mddev->curr_resync_completed;
3557 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3560 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3563 min_sync_show(mddev_t *mddev, char *page)
3565 return sprintf(page, "%llu\n",
3566 (unsigned long long)mddev->resync_min);
3569 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3571 unsigned long long min;
3572 if (strict_strtoull(buf, 10, &min))
3574 if (min > mddev->resync_max)
3576 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3579 /* Must be a multiple of chunk_size */
3580 if (mddev->chunk_sectors) {
3581 sector_t temp = min;
3582 if (sector_div(temp, mddev->chunk_sectors))
3585 mddev->resync_min = min;
3590 static struct md_sysfs_entry md_min_sync =
3591 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3594 max_sync_show(mddev_t *mddev, char *page)
3596 if (mddev->resync_max == MaxSector)
3597 return sprintf(page, "max\n");
3599 return sprintf(page, "%llu\n",
3600 (unsigned long long)mddev->resync_max);
3603 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3605 if (strncmp(buf, "max", 3) == 0)
3606 mddev->resync_max = MaxSector;
3608 unsigned long long max;
3609 if (strict_strtoull(buf, 10, &max))
3611 if (max < mddev->resync_min)
3613 if (max < mddev->resync_max &&
3615 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3618 /* Must be a multiple of chunk_size */
3619 if (mddev->chunk_sectors) {
3620 sector_t temp = max;
3621 if (sector_div(temp, mddev->chunk_sectors))
3624 mddev->resync_max = max;
3626 wake_up(&mddev->recovery_wait);
3630 static struct md_sysfs_entry md_max_sync =
3631 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3634 suspend_lo_show(mddev_t *mddev, char *page)
3636 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3640 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3643 unsigned long long new = simple_strtoull(buf, &e, 10);
3645 if (mddev->pers == NULL ||
3646 mddev->pers->quiesce == NULL)
3648 if (buf == e || (*e && *e != '\n'))
3650 if (new >= mddev->suspend_hi ||
3651 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3652 mddev->suspend_lo = new;
3653 mddev->pers->quiesce(mddev, 2);
3658 static struct md_sysfs_entry md_suspend_lo =
3659 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3663 suspend_hi_show(mddev_t *mddev, char *page)
3665 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3669 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3672 unsigned long long new = simple_strtoull(buf, &e, 10);
3674 if (mddev->pers == NULL ||
3675 mddev->pers->quiesce == NULL)
3677 if (buf == e || (*e && *e != '\n'))
3679 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3680 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3681 mddev->suspend_hi = new;
3682 mddev->pers->quiesce(mddev, 1);
3683 mddev->pers->quiesce(mddev, 0);
3688 static struct md_sysfs_entry md_suspend_hi =
3689 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3692 reshape_position_show(mddev_t *mddev, char *page)
3694 if (mddev->reshape_position != MaxSector)
3695 return sprintf(page, "%llu\n",
3696 (unsigned long long)mddev->reshape_position);
3697 strcpy(page, "none\n");
3702 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3705 unsigned long long new = simple_strtoull(buf, &e, 10);
3708 if (buf == e || (*e && *e != '\n'))
3710 mddev->reshape_position = new;
3711 mddev->delta_disks = 0;
3712 mddev->new_level = mddev->level;
3713 mddev->new_layout = mddev->layout;
3714 mddev->new_chunk_sectors = mddev->chunk_sectors;
3718 static struct md_sysfs_entry md_reshape_position =
3719 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3720 reshape_position_store);
3723 array_size_show(mddev_t *mddev, char *page)
3725 if (mddev->external_size)
3726 return sprintf(page, "%llu\n",
3727 (unsigned long long)mddev->array_sectors/2);
3729 return sprintf(page, "default\n");
3733 array_size_store(mddev_t *mddev, const char *buf, size_t len)
3737 if (strncmp(buf, "default", 7) == 0) {
3739 sectors = mddev->pers->size(mddev, 0, 0);
3741 sectors = mddev->array_sectors;
3743 mddev->external_size = 0;
3745 if (strict_blocks_to_sectors(buf, §ors) < 0)
3747 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3750 mddev->external_size = 1;
3753 mddev->array_sectors = sectors;
3754 set_capacity(mddev->gendisk, mddev->array_sectors);
3756 revalidate_disk(mddev->gendisk);
3761 static struct md_sysfs_entry md_array_size =
3762 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
3765 static struct attribute *md_default_attrs[] = {
3768 &md_raid_disks.attr,
3769 &md_chunk_size.attr,
3771 &md_resync_start.attr,
3773 &md_new_device.attr,
3774 &md_safe_delay.attr,
3775 &md_array_state.attr,
3776 &md_reshape_position.attr,
3777 &md_array_size.attr,
3781 static struct attribute *md_redundancy_attrs[] = {
3783 &md_mismatches.attr,
3786 &md_sync_speed.attr,
3787 &md_sync_force_parallel.attr,
3788 &md_sync_completed.attr,
3791 &md_suspend_lo.attr,
3792 &md_suspend_hi.attr,
3797 static struct attribute_group md_redundancy_group = {
3799 .attrs = md_redundancy_attrs,
3804 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3806 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3807 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3812 rv = mddev_lock(mddev);
3814 rv = entry->show(mddev, page);
3815 mddev_unlock(mddev);
3821 md_attr_store(struct kobject *kobj, struct attribute *attr,
3822 const char *page, size_t length)
3824 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3825 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3830 if (!capable(CAP_SYS_ADMIN))
3832 rv = mddev_lock(mddev);
3833 if (mddev->hold_active == UNTIL_IOCTL)
3834 mddev->hold_active = 0;
3836 rv = entry->store(mddev, page, length);
3837 mddev_unlock(mddev);
3842 static void md_free(struct kobject *ko)
3844 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3846 if (mddev->sysfs_state)
3847 sysfs_put(mddev->sysfs_state);
3849 if (mddev->gendisk) {
3850 del_gendisk(mddev->gendisk);
3851 put_disk(mddev->gendisk);
3854 blk_cleanup_queue(mddev->queue);
3859 static struct sysfs_ops md_sysfs_ops = {
3860 .show = md_attr_show,
3861 .store = md_attr_store,
3863 static struct kobj_type md_ktype = {
3865 .sysfs_ops = &md_sysfs_ops,
3866 .default_attrs = md_default_attrs,
3871 static void mddev_delayed_delete(struct work_struct *ws)
3873 mddev_t *mddev = container_of(ws, mddev_t, del_work);
3875 if (mddev->private == &md_redundancy_group) {
3876 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3877 if (mddev->sysfs_action)
3878 sysfs_put(mddev->sysfs_action);
3879 mddev->sysfs_action = NULL;
3880 mddev->private = NULL;
3882 kobject_del(&mddev->kobj);
3883 kobject_put(&mddev->kobj);
3886 static int md_alloc(dev_t dev, char *name)
3888 static DEFINE_MUTEX(disks_mutex);
3889 mddev_t *mddev = mddev_find(dev);
3890 struct gendisk *disk;
3899 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
3900 shift = partitioned ? MdpMinorShift : 0;
3901 unit = MINOR(mddev->unit) >> shift;
3903 /* wait for any previous instance if this device
3904 * to be completed removed (mddev_delayed_delete).
3906 flush_scheduled_work();
3908 mutex_lock(&disks_mutex);
3914 /* Need to ensure that 'name' is not a duplicate.
3917 spin_lock(&all_mddevs_lock);
3919 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
3920 if (mddev2->gendisk &&
3921 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3922 spin_unlock(&all_mddevs_lock);
3925 spin_unlock(&all_mddevs_lock);
3929 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3932 mddev->queue->queuedata = mddev;
3934 /* Can be unlocked because the queue is new: no concurrency */
3935 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
3937 blk_queue_make_request(mddev->queue, md_make_request);
3939 disk = alloc_disk(1 << shift);
3941 blk_cleanup_queue(mddev->queue);
3942 mddev->queue = NULL;
3945 disk->major = MAJOR(mddev->unit);
3946 disk->first_minor = unit << shift;
3948 strcpy(disk->disk_name, name);
3949 else if (partitioned)
3950 sprintf(disk->disk_name, "md_d%d", unit);
3952 sprintf(disk->disk_name, "md%d", unit);
3953 disk->fops = &md_fops;
3954 disk->private_data = mddev;
3955 disk->queue = mddev->queue;
3956 /* Allow extended partitions. This makes the
3957 * 'mdp' device redundant, but we can't really
3960 disk->flags |= GENHD_FL_EXT_DEVT;
3962 mddev->gendisk = disk;
3963 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3964 &disk_to_dev(disk)->kobj, "%s", "md");
3966 /* This isn't possible, but as kobject_init_and_add is marked
3967 * __must_check, we must do something with the result
3969 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3974 mutex_unlock(&disks_mutex);
3976 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3977 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
3983 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3985 md_alloc(dev, NULL);
3989 static int add_named_array(const char *val, struct kernel_param *kp)
3991 /* val must be "md_*" where * is not all digits.
3992 * We allocate an array with a large free minor number, and
3993 * set the name to val. val must not already be an active name.
3995 int len = strlen(val);
3996 char buf[DISK_NAME_LEN];
3998 while (len && val[len-1] == '\n')
4000 if (len >= DISK_NAME_LEN)
4002 strlcpy(buf, val, len+1);
4003 if (strncmp(buf, "md_", 3) != 0)
4005 return md_alloc(0, buf);
4008 static void md_safemode_timeout(unsigned long data)
4010 mddev_t *mddev = (mddev_t *) data;
4012 if (!atomic_read(&mddev->writes_pending)) {
4013 mddev->safemode = 1;
4014 if (mddev->external)
4015 sysfs_notify_dirent(mddev->sysfs_state);
4017 md_wakeup_thread(mddev->thread);
4020 static int start_dirty_degraded;
4022 static int do_md_run(mddev_t * mddev)
4026 struct gendisk *disk;
4027 struct mdk_personality *pers;
4029 if (list_empty(&mddev->disks))
4030 /* cannot run an array with no devices.. */
4037 * Analyze all RAID superblock(s)
4039 if (!mddev->raid_disks) {
4040 if (!mddev->persistent)
4045 if (mddev->level != LEVEL_NONE)
4046 request_module("md-level-%d", mddev->level);
4047 else if (mddev->clevel[0])
4048 request_module("md-%s", mddev->clevel);
4051 * Drop all container device buffers, from now on
4052 * the only valid external interface is through the md
4055 list_for_each_entry(rdev, &mddev->disks, same_set) {
4056 if (test_bit(Faulty, &rdev->flags))
4058 sync_blockdev(rdev->bdev);
4059 invalidate_bdev(rdev->bdev);
4061 /* perform some consistency tests on the device.
4062 * We don't want the data to overlap the metadata,
4063 * Internal Bitmap issues have been handled elsewhere.
4065 if (rdev->data_offset < rdev->sb_start) {
4066 if (mddev->dev_sectors &&
4067 rdev->data_offset + mddev->dev_sectors
4069 printk("md: %s: data overlaps metadata\n",
4074 if (rdev->sb_start + rdev->sb_size/512
4075 > rdev->data_offset) {
4076 printk("md: %s: metadata overlaps data\n",
4081 sysfs_notify_dirent(rdev->sysfs_state);
4084 md_probe(mddev->unit, NULL, NULL);
4085 disk = mddev->gendisk;
4089 spin_lock(&pers_lock);
4090 pers = find_pers(mddev->level, mddev->clevel);
4091 if (!pers || !try_module_get(pers->owner)) {
4092 spin_unlock(&pers_lock);
4093 if (mddev->level != LEVEL_NONE)
4094 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4097 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4102 spin_unlock(&pers_lock);
4103 if (mddev->level != pers->level) {
4104 mddev->level = pers->level;
4105 mddev->new_level = pers->level;
4107 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4109 if (mddev->reshape_position != MaxSector &&
4110 pers->start_reshape == NULL) {
4111 /* This personality cannot handle reshaping... */
4113 module_put(pers->owner);
4117 if (pers->sync_request) {
4118 /* Warn if this is a potentially silly
4121 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4125 list_for_each_entry(rdev, &mddev->disks, same_set)
4126 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4128 rdev->bdev->bd_contains ==
4129 rdev2->bdev->bd_contains) {
4131 "%s: WARNING: %s appears to be"
4132 " on the same physical disk as"
4135 bdevname(rdev->bdev,b),
4136 bdevname(rdev2->bdev,b2));
4143 "True protection against single-disk"
4144 " failure might be compromised.\n");
4147 mddev->recovery = 0;
4148 /* may be over-ridden by personality */
4149 mddev->resync_max_sectors = mddev->dev_sectors;
4151 mddev->barriers_work = 1;
4152 mddev->ok_start_degraded = start_dirty_degraded;
4155 mddev->ro = 2; /* read-only, but switch on first write */
4157 err = mddev->pers->run(mddev);
4159 printk(KERN_ERR "md: pers->run() failed ...\n");
4160 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4161 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4162 " but 'external_size' not in effect?\n", __func__);
4164 "md: invalid array_size %llu > default size %llu\n",
4165 (unsigned long long)mddev->array_sectors / 2,
4166 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4168 mddev->pers->stop(mddev);
4170 if (err == 0 && mddev->pers->sync_request) {
4171 err = bitmap_create(mddev);
4173 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4174 mdname(mddev), err);
4175 mddev->pers->stop(mddev);
4179 module_put(mddev->pers->owner);
4181 bitmap_destroy(mddev);
4184 if (mddev->pers->sync_request) {
4185 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4187 "md: cannot register extra attributes for %s\n",
4189 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4190 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4193 atomic_set(&mddev->writes_pending,0);
4194 mddev->safemode = 0;
4195 mddev->safemode_timer.function = md_safemode_timeout;
4196 mddev->safemode_timer.data = (unsigned long) mddev;
4197 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4200 list_for_each_entry(rdev, &mddev->disks, same_set)
4201 if (rdev->raid_disk >= 0) {
4203 sprintf(nm, "rd%d", rdev->raid_disk);
4204 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4205 printk("md: cannot register %s for %s\n",
4209 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4212 md_update_sb(mddev, 0);
4214 set_capacity(disk, mddev->array_sectors);
4216 /* If there is a partially-recovered drive we need to
4217 * start recovery here. If we leave it to md_check_recovery,
4218 * it will remove the drives and not do the right thing
4220 if (mddev->degraded && !mddev->sync_thread) {
4222 list_for_each_entry(rdev, &mddev->disks, same_set)
4223 if (rdev->raid_disk >= 0 &&
4224 !test_bit(In_sync, &rdev->flags) &&
4225 !test_bit(Faulty, &rdev->flags))
4226 /* complete an interrupted recovery */
4228 if (spares && mddev->pers->sync_request) {
4229 mddev->recovery = 0;
4230 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4231 mddev->sync_thread = md_register_thread(md_do_sync,
4234 if (!mddev->sync_thread) {
4235 printk(KERN_ERR "%s: could not start resync"
4238 /* leave the spares where they are, it shouldn't hurt */
4239 mddev->recovery = 0;
4243 md_wakeup_thread(mddev->thread);
4244 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4246 revalidate_disk(mddev->gendisk);
4248 md_new_event(mddev);
4249 sysfs_notify_dirent(mddev->sysfs_state);
4250 if (mddev->sysfs_action)
4251 sysfs_notify_dirent(mddev->sysfs_action);
4252 sysfs_notify(&mddev->kobj, NULL, "degraded");
4253 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4257 static int restart_array(mddev_t *mddev)
4259 struct gendisk *disk = mddev->gendisk;
4261 /* Complain if it has no devices */
4262 if (list_empty(&mddev->disks))
4268 mddev->safemode = 0;
4270 set_disk_ro(disk, 0);
4271 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4273 /* Kick recovery or resync if necessary */
4274 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4275 md_wakeup_thread(mddev->thread);
4276 md_wakeup_thread(mddev->sync_thread);
4277 sysfs_notify_dirent(mddev->sysfs_state);
4281 /* similar to deny_write_access, but accounts for our holding a reference
4282 * to the file ourselves */
4283 static int deny_bitmap_write_access(struct file * file)
4285 struct inode *inode = file->f_mapping->host;
4287 spin_lock(&inode->i_lock);
4288 if (atomic_read(&inode->i_writecount) > 1) {
4289 spin_unlock(&inode->i_lock);
4292 atomic_set(&inode->i_writecount, -1);
4293 spin_unlock(&inode->i_lock);
4298 static void restore_bitmap_write_access(struct file *file)
4300 struct inode *inode = file->f_mapping->host;
4302 spin_lock(&inode->i_lock);
4303 atomic_set(&inode->i_writecount, 1);
4304 spin_unlock(&inode->i_lock);
4308 * 0 - completely stop and dis-assemble array
4309 * 1 - switch to readonly
4310 * 2 - stop but do not disassemble array
4312 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4315 struct gendisk *disk = mddev->gendisk;
4318 mutex_lock(&mddev->open_mutex);
4319 if (atomic_read(&mddev->openers) > is_open) {
4320 printk("md: %s still in use.\n",mdname(mddev));
4322 } else if (mddev->pers) {
4324 if (mddev->sync_thread) {
4325 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4326 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4327 md_unregister_thread(mddev->sync_thread);
4328 mddev->sync_thread = NULL;
4331 del_timer_sync(&mddev->safemode_timer);
4334 case 1: /* readonly */
4340 case 0: /* disassemble */
4342 bitmap_flush(mddev);
4343 md_super_wait(mddev);
4345 set_disk_ro(disk, 0);
4347 mddev->pers->stop(mddev);
4348 mddev->queue->merge_bvec_fn = NULL;
4349 mddev->queue->unplug_fn = NULL;
4350 mddev->queue->backing_dev_info.congested_fn = NULL;
4351 module_put(mddev->pers->owner);
4352 if (mddev->pers->sync_request)
4353 mddev->private = &md_redundancy_group;
4355 /* tell userspace to handle 'inactive' */
4356 sysfs_notify_dirent(mddev->sysfs_state);
4358 list_for_each_entry(rdev, &mddev->disks, same_set)
4359 if (rdev->raid_disk >= 0) {
4361 sprintf(nm, "rd%d", rdev->raid_disk);
4362 sysfs_remove_link(&mddev->kobj, nm);
4365 set_capacity(disk, 0);
4371 if (!mddev->in_sync || mddev->flags) {
4372 /* mark array as shutdown cleanly */
4374 md_update_sb(mddev, 1);
4377 set_disk_ro(disk, 1);
4378 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4382 mutex_unlock(&mddev->open_mutex);
4386 * Free resources if final stop
4390 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4392 bitmap_destroy(mddev);
4393 if (mddev->bitmap_file) {
4394 restore_bitmap_write_access(mddev->bitmap_file);
4395 fput(mddev->bitmap_file);
4396 mddev->bitmap_file = NULL;
4398 mddev->bitmap_offset = 0;
4400 /* make sure all md_delayed_delete calls have finished */
4401 flush_scheduled_work();
4403 export_array(mddev);
4405 mddev->array_sectors = 0;
4406 mddev->external_size = 0;
4407 mddev->dev_sectors = 0;
4408 mddev->raid_disks = 0;
4409 mddev->recovery_cp = 0;
4410 mddev->resync_min = 0;
4411 mddev->resync_max = MaxSector;
4412 mddev->reshape_position = MaxSector;
4413 mddev->external = 0;
4414 mddev->persistent = 0;
4415 mddev->level = LEVEL_NONE;
4416 mddev->clevel[0] = 0;
4419 mddev->metadata_type[0] = 0;
4420 mddev->chunk_sectors = 0;
4421 mddev->ctime = mddev->utime = 0;
4423 mddev->max_disks = 0;
4425 mddev->delta_disks = 0;
4426 mddev->new_level = LEVEL_NONE;
4427 mddev->new_layout = 0;
4428 mddev->new_chunk_sectors = 0;
4429 mddev->curr_resync = 0;
4430 mddev->resync_mismatches = 0;
4431 mddev->suspend_lo = mddev->suspend_hi = 0;
4432 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4433 mddev->recovery = 0;
4436 mddev->degraded = 0;
4437 mddev->barriers_work = 0;
4438 mddev->safemode = 0;
4439 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4440 if (mddev->hold_active == UNTIL_STOP)
4441 mddev->hold_active = 0;
4443 } else if (mddev->pers)
4444 printk(KERN_INFO "md: %s switched to read-only mode.\n",
4447 blk_integrity_unregister(disk);
4448 md_new_event(mddev);
4449 sysfs_notify_dirent(mddev->sysfs_state);
4454 static void autorun_array(mddev_t *mddev)
4459 if (list_empty(&mddev->disks))
4462 printk(KERN_INFO "md: running: ");
4464 list_for_each_entry(rdev, &mddev->disks, same_set) {
4465 char b[BDEVNAME_SIZE];
4466 printk("<%s>", bdevname(rdev->bdev,b));
4470 err = do_md_run(mddev);
4472 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4473 do_md_stop(mddev, 0, 0);
4478 * lets try to run arrays based on all disks that have arrived
4479 * until now. (those are in pending_raid_disks)
4481 * the method: pick the first pending disk, collect all disks with
4482 * the same UUID, remove all from the pending list and put them into
4483 * the 'same_array' list. Then order this list based on superblock
4484 * update time (freshest comes first), kick out 'old' disks and
4485 * compare superblocks. If everything's fine then run it.
4487 * If "unit" is allocated, then bump its reference count
4489 static void autorun_devices(int part)
4491 mdk_rdev_t *rdev0, *rdev, *tmp;
4493 char b[BDEVNAME_SIZE];
4495 printk(KERN_INFO "md: autorun ...\n");
4496 while (!list_empty(&pending_raid_disks)) {
4499 LIST_HEAD(candidates);
4500 rdev0 = list_entry(pending_raid_disks.next,
4501 mdk_rdev_t, same_set);
4503 printk(KERN_INFO "md: considering %s ...\n",
4504 bdevname(rdev0->bdev,b));
4505 INIT_LIST_HEAD(&candidates);
4506 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4507 if (super_90_load(rdev, rdev0, 0) >= 0) {
4508 printk(KERN_INFO "md: adding %s ...\n",
4509 bdevname(rdev->bdev,b));
4510 list_move(&rdev->same_set, &candidates);
4513 * now we have a set of devices, with all of them having
4514 * mostly sane superblocks. It's time to allocate the
4518 dev = MKDEV(mdp_major,
4519 rdev0->preferred_minor << MdpMinorShift);
4520 unit = MINOR(dev) >> MdpMinorShift;
4522 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4525 if (rdev0->preferred_minor != unit) {
4526 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4527 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4531 md_probe(dev, NULL, NULL);
4532 mddev = mddev_find(dev);
4533 if (!mddev || !mddev->gendisk) {
4537 "md: cannot allocate memory for md drive.\n");
4540 if (mddev_lock(mddev))
4541 printk(KERN_WARNING "md: %s locked, cannot run\n",
4543 else if (mddev->raid_disks || mddev->major_version
4544 || !list_empty(&mddev->disks)) {
4546 "md: %s already running, cannot run %s\n",
4547 mdname(mddev), bdevname(rdev0->bdev,b));
4548 mddev_unlock(mddev);
4550 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4551 mddev->persistent = 1;
4552 rdev_for_each_list(rdev, tmp, &candidates) {
4553 list_del_init(&rdev->same_set);
4554 if (bind_rdev_to_array(rdev, mddev))
4557 autorun_array(mddev);
4558 mddev_unlock(mddev);
4560 /* on success, candidates will be empty, on error
4563 rdev_for_each_list(rdev, tmp, &candidates) {
4564 list_del_init(&rdev->same_set);
4569 printk(KERN_INFO "md: ... autorun DONE.\n");
4571 #endif /* !MODULE */
4573 static int get_version(void __user * arg)
4577 ver.major = MD_MAJOR_VERSION;
4578 ver.minor = MD_MINOR_VERSION;
4579 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4581 if (copy_to_user(arg, &ver, sizeof(ver)))
4587 static int get_array_info(mddev_t * mddev, void __user * arg)
4589 mdu_array_info_t info;
4590 int nr,working,insync,failed,spare;
4593 nr=working=insync=failed=spare=0;
4594 list_for_each_entry(rdev, &mddev->disks, same_set) {
4596 if (test_bit(Faulty, &rdev->flags))
4600 if (test_bit(In_sync, &rdev->flags))
4607 info.major_version = mddev->major_version;
4608 info.minor_version = mddev->minor_version;
4609 info.patch_version = MD_PATCHLEVEL_VERSION;
4610 info.ctime = mddev->ctime;
4611 info.level = mddev->level;
4612 info.size = mddev->dev_sectors / 2;
4613 if (info.size != mddev->dev_sectors / 2) /* overflow */
4616 info.raid_disks = mddev->raid_disks;
4617 info.md_minor = mddev->md_minor;
4618 info.not_persistent= !mddev->persistent;
4620 info.utime = mddev->utime;
4623 info.state = (1<<MD_SB_CLEAN);
4624 if (mddev->bitmap && mddev->bitmap_offset)
4625 info.state = (1<<MD_SB_BITMAP_PRESENT);
4626 info.active_disks = insync;
4627 info.working_disks = working;
4628 info.failed_disks = failed;
4629 info.spare_disks = spare;
4631 info.layout = mddev->layout;
4632 info.chunk_size = mddev->chunk_sectors << 9;
4634 if (copy_to_user(arg, &info, sizeof(info)))
4640 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4642 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4643 char *ptr, *buf = NULL;
4646 if (md_allow_write(mddev))
4647 file = kmalloc(sizeof(*file), GFP_NOIO);
4649 file = kmalloc(sizeof(*file), GFP_KERNEL);
4654 /* bitmap disabled, zero the first byte and copy out */
4655 if (!mddev->bitmap || !mddev->bitmap->file) {
4656 file->pathname[0] = '\0';
4660 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4664 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4668 strcpy(file->pathname, ptr);
4672 if (copy_to_user(arg, file, sizeof(*file)))
4680 static int get_disk_info(mddev_t * mddev, void __user * arg)
4682 mdu_disk_info_t info;
4685 if (copy_from_user(&info, arg, sizeof(info)))
4688 rdev = find_rdev_nr(mddev, info.number);
4690 info.major = MAJOR(rdev->bdev->bd_dev);
4691 info.minor = MINOR(rdev->bdev->bd_dev);
4692 info.raid_disk = rdev->raid_disk;
4694 if (test_bit(Faulty, &rdev->flags))
4695 info.state |= (1<<MD_DISK_FAULTY);
4696 else if (test_bit(In_sync, &rdev->flags)) {
4697 info.state |= (1<<MD_DISK_ACTIVE);
4698 info.state |= (1<<MD_DISK_SYNC);
4700 if (test_bit(WriteMostly, &rdev->flags))
4701 info.state |= (1<<MD_DISK_WRITEMOSTLY);
4703 info.major = info.minor = 0;
4704 info.raid_disk = -1;
4705 info.state = (1<<MD_DISK_REMOVED);
4708 if (copy_to_user(arg, &info, sizeof(info)))
4714 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4716 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4718 dev_t dev = MKDEV(info->major,info->minor);
4720 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4723 if (!mddev->raid_disks) {
4725 /* expecting a device which has a superblock */
4726 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4729 "md: md_import_device returned %ld\n",
4731 return PTR_ERR(rdev);
4733 if (!list_empty(&mddev->disks)) {
4734 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4735 mdk_rdev_t, same_set);
4736 err = super_types[mddev->major_version]
4737 .load_super(rdev, rdev0, mddev->minor_version);
4740 "md: %s has different UUID to %s\n",
4741 bdevname(rdev->bdev,b),
4742 bdevname(rdev0->bdev,b2));
4747 err = bind_rdev_to_array(rdev, mddev);
4754 * add_new_disk can be used once the array is assembled
4755 * to add "hot spares". They must already have a superblock
4760 if (!mddev->pers->hot_add_disk) {
4762 "%s: personality does not support diskops!\n",
4766 if (mddev->persistent)
4767 rdev = md_import_device(dev, mddev->major_version,
4768 mddev->minor_version);
4770 rdev = md_import_device(dev, -1, -1);
4773 "md: md_import_device returned %ld\n",
4775 return PTR_ERR(rdev);
4777 /* set save_raid_disk if appropriate */
4778 if (!mddev->persistent) {
4779 if (info->state & (1<<MD_DISK_SYNC) &&
4780 info->raid_disk < mddev->raid_disks)
4781 rdev->raid_disk = info->raid_disk;
4783 rdev->raid_disk = -1;
4785 super_types[mddev->major_version].
4786 validate_super(mddev, rdev);
4787 rdev->saved_raid_disk = rdev->raid_disk;
4789 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4790 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4791 set_bit(WriteMostly, &rdev->flags);
4793 clear_bit(WriteMostly, &rdev->flags);
4795 rdev->raid_disk = -1;
4796 err = bind_rdev_to_array(rdev, mddev);
4797 if (!err && !mddev->pers->hot_remove_disk) {
4798 /* If there is hot_add_disk but no hot_remove_disk
4799 * then added disks for geometry changes,
4800 * and should be added immediately.
4802 super_types[mddev->major_version].
4803 validate_super(mddev, rdev);
4804 err = mddev->pers->hot_add_disk(mddev, rdev);
4806 unbind_rdev_from_array(rdev);
4811 sysfs_notify_dirent(rdev->sysfs_state);
4813 md_update_sb(mddev, 1);
4814 if (mddev->degraded)
4815 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4816 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4817 md_wakeup_thread(mddev->thread);
4821 /* otherwise, add_new_disk is only allowed
4822 * for major_version==0 superblocks
4824 if (mddev->major_version != 0) {
4825 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4830 if (!(info->state & (1<<MD_DISK_FAULTY))) {
4832 rdev = md_import_device(dev, -1, 0);
4835 "md: error, md_import_device() returned %ld\n",
4837 return PTR_ERR(rdev);
4839 rdev->desc_nr = info->number;
4840 if (info->raid_disk < mddev->raid_disks)
4841 rdev->raid_disk = info->raid_disk;
4843 rdev->raid_disk = -1;
4845 if (rdev->raid_disk < mddev->raid_disks)
4846 if (info->state & (1<<MD_DISK_SYNC))
4847 set_bit(In_sync, &rdev->flags);
4849 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4850 set_bit(WriteMostly, &rdev->flags);
4852 if (!mddev->persistent) {
4853 printk(KERN_INFO "md: nonpersistent superblock ...\n");
4854 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4856 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4857 rdev->sectors = rdev->sb_start;
4859 err = bind_rdev_to_array(rdev, mddev);
4869 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4871 char b[BDEVNAME_SIZE];
4874 rdev = find_rdev(mddev, dev);
4878 if (rdev->raid_disk >= 0)
4881 kick_rdev_from_array(rdev);
4882 md_update_sb(mddev, 1);
4883 md_new_event(mddev);
4887 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4888 bdevname(rdev->bdev,b), mdname(mddev));
4892 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4894 char b[BDEVNAME_SIZE];
4901 if (mddev->major_version != 0) {
4902 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4903 " version-0 superblocks.\n",
4907 if (!mddev->pers->hot_add_disk) {
4909 "%s: personality does not support diskops!\n",
4914 rdev = md_import_device(dev, -1, 0);
4917 "md: error, md_import_device() returned %ld\n",
4922 if (mddev->persistent)
4923 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4925 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4927 rdev->sectors = rdev->sb_start;
4929 if (test_bit(Faulty, &rdev->flags)) {
4931 "md: can not hot-add faulty %s disk to %s!\n",
4932 bdevname(rdev->bdev,b), mdname(mddev));
4936 clear_bit(In_sync, &rdev->flags);
4938 rdev->saved_raid_disk = -1;
4939 err = bind_rdev_to_array(rdev, mddev);
4944 * The rest should better be atomic, we can have disk failures
4945 * noticed in interrupt contexts ...
4948 rdev->raid_disk = -1;
4950 md_update_sb(mddev, 1);
4953 * Kick recovery, maybe this spare has to be added to the
4954 * array immediately.
4956 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4957 md_wakeup_thread(mddev->thread);
4958 md_new_event(mddev);
4966 static int set_bitmap_file(mddev_t *mddev, int fd)
4971 if (!mddev->pers->quiesce)
4973 if (mddev->recovery || mddev->sync_thread)
4975 /* we should be able to change the bitmap.. */
4981 return -EEXIST; /* cannot add when bitmap is present */
4982 mddev->bitmap_file = fget(fd);
4984 if (mddev->bitmap_file == NULL) {
4985 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4990 err = deny_bitmap_write_access(mddev->bitmap_file);
4992 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4994 fput(mddev->bitmap_file);
4995 mddev->bitmap_file = NULL;
4998 mddev->bitmap_offset = 0; /* file overrides offset */
4999 } else if (mddev->bitmap == NULL)
5000 return -ENOENT; /* cannot remove what isn't there */
5003 mddev->pers->quiesce(mddev, 1);
5005 err = bitmap_create(mddev);
5006 if (fd < 0 || err) {
5007 bitmap_destroy(mddev);
5008 fd = -1; /* make sure to put the file */
5010 mddev->pers->quiesce(mddev, 0);
5013 if (mddev->bitmap_file) {
5014 restore_bitmap_write_access(mddev->bitmap_file);
5015 fput(mddev->bitmap_file);
5017 mddev->bitmap_file = NULL;
5024 * set_array_info is used two different ways
5025 * The original usage is when creating a new array.
5026 * In this usage, raid_disks is > 0 and it together with
5027 * level, size, not_persistent,layout,chunksize determine the
5028 * shape of the array.
5029 * This will always create an array with a type-0.90.0 superblock.
5030 * The newer usage is when assembling an array.
5031 * In this case raid_disks will be 0, and the major_version field is
5032 * use to determine which style super-blocks are to be found on the devices.
5033 * The minor and patch _version numbers are also kept incase the
5034 * super_block handler wishes to interpret them.
5036 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5039 if (info->raid_disks == 0) {
5040 /* just setting version number for superblock loading */
5041 if (info->major_version < 0 ||
5042 info->major_version >= ARRAY_SIZE(super_types) ||
5043 super_types[info->major_version].name == NULL) {
5044 /* maybe try to auto-load a module? */
5046 "md: superblock version %d not known\n",
5047 info->major_version);
5050 mddev->major_version = info->major_version;
5051 mddev->minor_version = info->minor_version;
5052 mddev->patch_version = info->patch_version;
5053 mddev->persistent = !info->not_persistent;
5056 mddev->major_version = MD_MAJOR_VERSION;
5057 mddev->minor_version = MD_MINOR_VERSION;
5058 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5059 mddev->ctime = get_seconds();
5061 mddev->level = info->level;
5062 mddev->clevel[0] = 0;
5063 mddev->dev_sectors = 2 * (sector_t)info->size;
5064 mddev->raid_disks = info->raid_disks;
5065 /* don't set md_minor, it is determined by which /dev/md* was
5068 if (info->state & (1<<MD_SB_CLEAN))
5069 mddev->recovery_cp = MaxSector;
5071 mddev->recovery_cp = 0;
5072 mddev->persistent = ! info->not_persistent;
5073 mddev->external = 0;
5075 mddev->layout = info->layout;
5076 mddev->chunk_sectors = info->chunk_size >> 9;
5078 mddev->max_disks = MD_SB_DISKS;
5080 if (mddev->persistent)
5082 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5084 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
5085 mddev->bitmap_offset = 0;
5087 mddev->reshape_position = MaxSector;
5090 * Generate a 128 bit UUID
5092 get_random_bytes(mddev->uuid, 16);
5094 mddev->new_level = mddev->level;
5095 mddev->new_chunk_sectors = mddev->chunk_sectors;
5096 mddev->new_layout = mddev->layout;
5097 mddev->delta_disks = 0;
5102 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5104 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5106 if (mddev->external_size)
5109 mddev->array_sectors = array_sectors;
5111 EXPORT_SYMBOL(md_set_array_sectors);
5113 static int update_size(mddev_t *mddev, sector_t num_sectors)
5117 int fit = (num_sectors == 0);
5119 if (mddev->pers->resize == NULL)
5121 /* The "num_sectors" is the number of sectors of each device that
5122 * is used. This can only make sense for arrays with redundancy.
5123 * linear and raid0 always use whatever space is available. We can only
5124 * consider changing this number if no resync or reconstruction is
5125 * happening, and if the new size is acceptable. It must fit before the
5126 * sb_start or, if that is <data_offset, it must fit before the size
5127 * of each device. If num_sectors is zero, we find the largest size
5131 if (mddev->sync_thread)
5134 /* Sorry, cannot grow a bitmap yet, just remove it,
5138 list_for_each_entry(rdev, &mddev->disks, same_set) {
5139 sector_t avail = rdev->sectors;
5141 if (fit && (num_sectors == 0 || num_sectors > avail))
5142 num_sectors = avail;
5143 if (avail < num_sectors)
5146 rv = mddev->pers->resize(mddev, num_sectors);
5148 revalidate_disk(mddev->gendisk);
5152 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5155 /* change the number of raid disks */
5156 if (mddev->pers->check_reshape == NULL)
5158 if (raid_disks <= 0 ||
5159 raid_disks >= mddev->max_disks)
5161 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5163 mddev->delta_disks = raid_disks - mddev->raid_disks;
5165 rv = mddev->pers->check_reshape(mddev);
5171 * update_array_info is used to change the configuration of an
5173 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5174 * fields in the info are checked against the array.
5175 * Any differences that cannot be handled will cause an error.
5176 * Normally, only one change can be managed at a time.
5178 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5184 /* calculate expected state,ignoring low bits */
5185 if (mddev->bitmap && mddev->bitmap_offset)
5186 state |= (1 << MD_SB_BITMAP_PRESENT);
5188 if (mddev->major_version != info->major_version ||
5189 mddev->minor_version != info->minor_version ||
5190 /* mddev->patch_version != info->patch_version || */
5191 mddev->ctime != info->ctime ||
5192 mddev->level != info->level ||
5193 /* mddev->layout != info->layout || */
5194 !mddev->persistent != info->not_persistent||
5195 mddev->chunk_sectors != info->chunk_size >> 9 ||
5196 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5197 ((state^info->state) & 0xfffffe00)
5200 /* Check there is only one change */
5201 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5203 if (mddev->raid_disks != info->raid_disks)
5205 if (mddev->layout != info->layout)
5207 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5214 if (mddev->layout != info->layout) {
5216 * we don't need to do anything at the md level, the
5217 * personality will take care of it all.
5219 if (mddev->pers->check_reshape == NULL)
5222 mddev->new_layout = info->layout;
5223 rv = mddev->pers->check_reshape(mddev);
5225 mddev->new_layout = mddev->layout;
5229 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5230 rv = update_size(mddev, (sector_t)info->size * 2);
5232 if (mddev->raid_disks != info->raid_disks)
5233 rv = update_raid_disks(mddev, info->raid_disks);
5235 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5236 if (mddev->pers->quiesce == NULL)
5238 if (mddev->recovery || mddev->sync_thread)
5240 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5241 /* add the bitmap */
5244 if (mddev->default_bitmap_offset == 0)
5246 mddev->bitmap_offset = mddev->default_bitmap_offset;
5247 mddev->pers->quiesce(mddev, 1);
5248 rv = bitmap_create(mddev);
5250 bitmap_destroy(mddev);
5251 mddev->pers->quiesce(mddev, 0);
5253 /* remove the bitmap */
5256 if (mddev->bitmap->file)
5258 mddev->pers->quiesce(mddev, 1);
5259 bitmap_destroy(mddev);
5260 mddev->pers->quiesce(mddev, 0);
5261 mddev->bitmap_offset = 0;
5264 md_update_sb(mddev, 1);
5268 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5272 if (mddev->pers == NULL)
5275 rdev = find_rdev(mddev, dev);
5279 md_error(mddev, rdev);
5284 * We have a problem here : there is no easy way to give a CHS
5285 * virtual geometry. We currently pretend that we have a 2 heads
5286 * 4 sectors (with a BIG number of cylinders...). This drives
5287 * dosfs just mad... ;-)
5289 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5291 mddev_t *mddev = bdev->bd_disk->private_data;
5295 geo->cylinders = get_capacity(mddev->gendisk) / 8;
5299 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5300 unsigned int cmd, unsigned long arg)
5303 void __user *argp = (void __user *)arg;
5304 mddev_t *mddev = NULL;
5306 if (!capable(CAP_SYS_ADMIN))
5310 * Commands dealing with the RAID driver but not any
5316 err = get_version(argp);
5319 case PRINT_RAID_DEBUG:
5327 autostart_arrays(arg);
5334 * Commands creating/starting a new array:
5337 mddev = bdev->bd_disk->private_data;
5344 err = mddev_lock(mddev);
5347 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5354 case SET_ARRAY_INFO:
5356 mdu_array_info_t info;
5358 memset(&info, 0, sizeof(info));
5359 else if (copy_from_user(&info, argp, sizeof(info))) {
5364 err = update_array_info(mddev, &info);
5366 printk(KERN_WARNING "md: couldn't update"
5367 " array info. %d\n", err);
5372 if (!list_empty(&mddev->disks)) {
5374 "md: array %s already has disks!\n",
5379 if (mddev->raid_disks) {
5381 "md: array %s already initialised!\n",
5386 err = set_array_info(mddev, &info);
5388 printk(KERN_WARNING "md: couldn't set"
5389 " array info. %d\n", err);
5399 * Commands querying/configuring an existing array:
5401 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5402 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5403 if ((!mddev->raid_disks && !mddev->external)
5404 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5405 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5406 && cmd != GET_BITMAP_FILE) {
5412 * Commands even a read-only array can execute:
5416 case GET_ARRAY_INFO:
5417 err = get_array_info(mddev, argp);
5420 case GET_BITMAP_FILE:
5421 err = get_bitmap_file(mddev, argp);
5425 err = get_disk_info(mddev, argp);
5428 case RESTART_ARRAY_RW:
5429 err = restart_array(mddev);
5433 err = do_md_stop(mddev, 0, 1);
5437 err = do_md_stop(mddev, 1, 1);
5443 * The remaining ioctls are changing the state of the
5444 * superblock, so we do not allow them on read-only arrays.
5445 * However non-MD ioctls (e.g. get-size) will still come through
5446 * here and hit the 'default' below, so only disallow
5447 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5449 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5450 if (mddev->ro == 2) {
5452 sysfs_notify_dirent(mddev->sysfs_state);
5453 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5454 md_wakeup_thread(mddev->thread);
5465 mdu_disk_info_t info;
5466 if (copy_from_user(&info, argp, sizeof(info)))
5469 err = add_new_disk(mddev, &info);
5473 case HOT_REMOVE_DISK:
5474 err = hot_remove_disk(mddev, new_decode_dev(arg));
5478 err = hot_add_disk(mddev, new_decode_dev(arg));
5481 case SET_DISK_FAULTY:
5482 err = set_disk_faulty(mddev, new_decode_dev(arg));
5486 err = do_md_run(mddev);
5489 case SET_BITMAP_FILE:
5490 err = set_bitmap_file(mddev, (int)arg);
5500 if (mddev->hold_active == UNTIL_IOCTL &&
5502 mddev->hold_active = 0;
5503 mddev_unlock(mddev);
5513 static int md_open(struct block_device *bdev, fmode_t mode)
5516 * Succeed if we can lock the mddev, which confirms that
5517 * it isn't being stopped right now.
5519 mddev_t *mddev = mddev_find(bdev->bd_dev);
5522 if (mddev->gendisk != bdev->bd_disk) {
5523 /* we are racing with mddev_put which is discarding this
5527 /* Wait until bdev->bd_disk is definitely gone */
5528 flush_scheduled_work();
5529 /* Then retry the open from the top */
5530 return -ERESTARTSYS;
5532 BUG_ON(mddev != bdev->bd_disk->private_data);
5534 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5538 atomic_inc(&mddev->openers);
5539 mutex_unlock(&mddev->open_mutex);
5541 check_disk_change(bdev);
5546 static int md_release(struct gendisk *disk, fmode_t mode)
5548 mddev_t *mddev = disk->private_data;
5551 atomic_dec(&mddev->openers);
5557 static int md_media_changed(struct gendisk *disk)
5559 mddev_t *mddev = disk->private_data;
5561 return mddev->changed;
5564 static int md_revalidate(struct gendisk *disk)
5566 mddev_t *mddev = disk->private_data;
5571 static const struct block_device_operations md_fops =
5573 .owner = THIS_MODULE,
5575 .release = md_release,
5577 .getgeo = md_getgeo,
5578 .media_changed = md_media_changed,
5579 .revalidate_disk= md_revalidate,
5582 static int md_thread(void * arg)
5584 mdk_thread_t *thread = arg;
5587 * md_thread is a 'system-thread', it's priority should be very
5588 * high. We avoid resource deadlocks individually in each
5589 * raid personality. (RAID5 does preallocation) We also use RR and
5590 * the very same RT priority as kswapd, thus we will never get
5591 * into a priority inversion deadlock.
5593 * we definitely have to have equal or higher priority than
5594 * bdflush, otherwise bdflush will deadlock if there are too
5595 * many dirty RAID5 blocks.
5598 allow_signal(SIGKILL);
5599 while (!kthread_should_stop()) {
5601 /* We need to wait INTERRUPTIBLE so that
5602 * we don't add to the load-average.
5603 * That means we need to be sure no signals are
5606 if (signal_pending(current))
5607 flush_signals(current);
5609 wait_event_interruptible_timeout
5611 test_bit(THREAD_WAKEUP, &thread->flags)
5612 || kthread_should_stop(),
5615 clear_bit(THREAD_WAKEUP, &thread->flags);
5617 thread->run(thread->mddev);
5623 void md_wakeup_thread(mdk_thread_t *thread)
5626 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5627 set_bit(THREAD_WAKEUP, &thread->flags);
5628 wake_up(&thread->wqueue);
5632 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5635 mdk_thread_t *thread;
5637 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5641 init_waitqueue_head(&thread->wqueue);
5644 thread->mddev = mddev;
5645 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5646 thread->tsk = kthread_run(md_thread, thread,
5648 mdname(thread->mddev),
5649 name ?: mddev->pers->name);
5650 if (IS_ERR(thread->tsk)) {
5657 void md_unregister_thread(mdk_thread_t *thread)
5661 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5663 kthread_stop(thread->tsk);
5667 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5674 if (!rdev || test_bit(Faulty, &rdev->flags))
5677 if (mddev->external)
5678 set_bit(Blocked, &rdev->flags);
5680 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5682 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5683 __builtin_return_address(0),__builtin_return_address(1),
5684 __builtin_return_address(2),__builtin_return_address(3));
5688 if (!mddev->pers->error_handler)
5690 mddev->pers->error_handler(mddev,rdev);
5691 if (mddev->degraded)
5692 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5693 set_bit(StateChanged, &rdev->flags);
5694 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5695 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5696 md_wakeup_thread(mddev->thread);
5697 md_new_event_inintr(mddev);
5700 /* seq_file implementation /proc/mdstat */
5702 static void status_unused(struct seq_file *seq)
5707 seq_printf(seq, "unused devices: ");
5709 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5710 char b[BDEVNAME_SIZE];
5712 seq_printf(seq, "%s ",
5713 bdevname(rdev->bdev,b));
5716 seq_printf(seq, "<none>");
5718 seq_printf(seq, "\n");
5722 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5724 sector_t max_sectors, resync, res;
5725 unsigned long dt, db;
5728 unsigned int per_milli;
5730 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
5732 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5733 max_sectors = mddev->resync_max_sectors;
5735 max_sectors = mddev->dev_sectors;
5738 * Should not happen.
5744 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5745 * in a sector_t, and (max_sectors>>scale) will fit in a
5746 * u32, as those are the requirements for sector_div.
5747 * Thus 'scale' must be at least 10
5750 if (sizeof(sector_t) > sizeof(unsigned long)) {
5751 while ( max_sectors/2 > (1ULL<<(scale+32)))
5754 res = (resync>>scale)*1000;
5755 sector_div(res, (u32)((max_sectors>>scale)+1));
5759 int i, x = per_milli/50, y = 20-x;
5760 seq_printf(seq, "[");
5761 for (i = 0; i < x; i++)
5762 seq_printf(seq, "=");
5763 seq_printf(seq, ">");
5764 for (i = 0; i < y; i++)
5765 seq_printf(seq, ".");
5766 seq_printf(seq, "] ");
5768 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5769 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5771 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5773 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5774 "resync" : "recovery"))),
5775 per_milli/10, per_milli % 10,
5776 (unsigned long long) resync/2,
5777 (unsigned long long) max_sectors/2);
5780 * dt: time from mark until now
5781 * db: blocks written from mark until now
5782 * rt: remaining time
5784 * rt is a sector_t, so could be 32bit or 64bit.
5785 * So we divide before multiply in case it is 32bit and close
5787 * We scale the divisor (db) by 32 to avoid loosing precision
5788 * near the end of resync when the number of remaining sectors
5790 * We then divide rt by 32 after multiplying by db to compensate.
5791 * The '+1' avoids division by zero if db is very small.
5793 dt = ((jiffies - mddev->resync_mark) / HZ);
5795 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5796 - mddev->resync_mark_cnt;
5798 rt = max_sectors - resync; /* number of remaining sectors */
5799 sector_div(rt, db/32+1);
5803 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
5804 ((unsigned long)rt % 60)/6);
5806 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5809 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5811 struct list_head *tmp;
5821 spin_lock(&all_mddevs_lock);
5822 list_for_each(tmp,&all_mddevs)
5824 mddev = list_entry(tmp, mddev_t, all_mddevs);
5826 spin_unlock(&all_mddevs_lock);
5829 spin_unlock(&all_mddevs_lock);
5831 return (void*)2;/* tail */
5835 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5837 struct list_head *tmp;
5838 mddev_t *next_mddev, *mddev = v;
5844 spin_lock(&all_mddevs_lock);
5846 tmp = all_mddevs.next;
5848 tmp = mddev->all_mddevs.next;
5849 if (tmp != &all_mddevs)
5850 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5852 next_mddev = (void*)2;
5855 spin_unlock(&all_mddevs_lock);
5863 static void md_seq_stop(struct seq_file *seq, void *v)
5867 if (mddev && v != (void*)1 && v != (void*)2)
5871 struct mdstat_info {
5875 static int md_seq_show(struct seq_file *seq, void *v)
5880 struct mdstat_info *mi = seq->private;
5881 struct bitmap *bitmap;
5883 if (v == (void*)1) {
5884 struct mdk_personality *pers;
5885 seq_printf(seq, "Personalities : ");
5886 spin_lock(&pers_lock);
5887 list_for_each_entry(pers, &pers_list, list)
5888 seq_printf(seq, "[%s] ", pers->name);
5890 spin_unlock(&pers_lock);
5891 seq_printf(seq, "\n");
5892 mi->event = atomic_read(&md_event_count);
5895 if (v == (void*)2) {
5900 if (mddev_lock(mddev) < 0)
5903 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5904 seq_printf(seq, "%s : %sactive", mdname(mddev),
5905 mddev->pers ? "" : "in");
5908 seq_printf(seq, " (read-only)");
5910 seq_printf(seq, " (auto-read-only)");
5911 seq_printf(seq, " %s", mddev->pers->name);
5915 list_for_each_entry(rdev, &mddev->disks, same_set) {
5916 char b[BDEVNAME_SIZE];
5917 seq_printf(seq, " %s[%d]",
5918 bdevname(rdev->bdev,b), rdev->desc_nr);
5919 if (test_bit(WriteMostly, &rdev->flags))
5920 seq_printf(seq, "(W)");
5921 if (test_bit(Faulty, &rdev->flags)) {
5922 seq_printf(seq, "(F)");
5924 } else if (rdev->raid_disk < 0)
5925 seq_printf(seq, "(S)"); /* spare */
5926 sectors += rdev->sectors;
5929 if (!list_empty(&mddev->disks)) {
5931 seq_printf(seq, "\n %llu blocks",
5932 (unsigned long long)
5933 mddev->array_sectors / 2);
5935 seq_printf(seq, "\n %llu blocks",
5936 (unsigned long long)sectors / 2);
5938 if (mddev->persistent) {
5939 if (mddev->major_version != 0 ||
5940 mddev->minor_version != 90) {
5941 seq_printf(seq," super %d.%d",
5942 mddev->major_version,
5943 mddev->minor_version);
5945 } else if (mddev->external)
5946 seq_printf(seq, " super external:%s",
5947 mddev->metadata_type);
5949 seq_printf(seq, " super non-persistent");
5952 mddev->pers->status(seq, mddev);
5953 seq_printf(seq, "\n ");
5954 if (mddev->pers->sync_request) {
5955 if (mddev->curr_resync > 2) {
5956 status_resync(seq, mddev);
5957 seq_printf(seq, "\n ");
5958 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5959 seq_printf(seq, "\tresync=DELAYED\n ");
5960 else if (mddev->recovery_cp < MaxSector)
5961 seq_printf(seq, "\tresync=PENDING\n ");
5964 seq_printf(seq, "\n ");
5966 if ((bitmap = mddev->bitmap)) {
5967 unsigned long chunk_kb;
5968 unsigned long flags;
5969 spin_lock_irqsave(&bitmap->lock, flags);
5970 chunk_kb = bitmap->chunksize >> 10;
5971 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5973 bitmap->pages - bitmap->missing_pages,
5975 (bitmap->pages - bitmap->missing_pages)
5976 << (PAGE_SHIFT - 10),
5977 chunk_kb ? chunk_kb : bitmap->chunksize,
5978 chunk_kb ? "KB" : "B");
5980 seq_printf(seq, ", file: ");
5981 seq_path(seq, &bitmap->file->f_path, " \t\n");
5984 seq_printf(seq, "\n");
5985 spin_unlock_irqrestore(&bitmap->lock, flags);
5988 seq_printf(seq, "\n");
5990 mddev_unlock(mddev);
5995 static const struct seq_operations md_seq_ops = {
5996 .start = md_seq_start,
5997 .next = md_seq_next,
5998 .stop = md_seq_stop,
5999 .show = md_seq_show,
6002 static int md_seq_open(struct inode *inode, struct file *file)
6005 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6009 error = seq_open(file, &md_seq_ops);
6013 struct seq_file *p = file->private_data;
6015 mi->event = atomic_read(&md_event_count);
6020 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6022 struct seq_file *m = filp->private_data;
6023 struct mdstat_info *mi = m->private;
6026 poll_wait(filp, &md_event_waiters, wait);
6028 /* always allow read */
6029 mask = POLLIN | POLLRDNORM;
6031 if (mi->event != atomic_read(&md_event_count))
6032 mask |= POLLERR | POLLPRI;
6036 static const struct file_operations md_seq_fops = {
6037 .owner = THIS_MODULE,
6038 .open = md_seq_open,
6040 .llseek = seq_lseek,
6041 .release = seq_release_private,
6042 .poll = mdstat_poll,
6045 int register_md_personality(struct mdk_personality *p)
6047 spin_lock(&pers_lock);
6048 list_add_tail(&p->list, &pers_list);
6049 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6050 spin_unlock(&pers_lock);
6054 int unregister_md_personality(struct mdk_personality *p)
6056 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6057 spin_lock(&pers_lock);
6058 list_del_init(&p->list);
6059 spin_unlock(&pers_lock);
6063 static int is_mddev_idle(mddev_t *mddev, int init)
6071 rdev_for_each_rcu(rdev, mddev) {
6072 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6073 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6074 (int)part_stat_read(&disk->part0, sectors[1]) -
6075 atomic_read(&disk->sync_io);
6076 /* sync IO will cause sync_io to increase before the disk_stats
6077 * as sync_io is counted when a request starts, and
6078 * disk_stats is counted when it completes.
6079 * So resync activity will cause curr_events to be smaller than
6080 * when there was no such activity.
6081 * non-sync IO will cause disk_stat to increase without
6082 * increasing sync_io so curr_events will (eventually)
6083 * be larger than it was before. Once it becomes
6084 * substantially larger, the test below will cause
6085 * the array to appear non-idle, and resync will slow
6087 * If there is a lot of outstanding resync activity when
6088 * we set last_event to curr_events, then all that activity
6089 * completing might cause the array to appear non-idle
6090 * and resync will be slowed down even though there might
6091 * not have been non-resync activity. This will only
6092 * happen once though. 'last_events' will soon reflect
6093 * the state where there is little or no outstanding
6094 * resync requests, and further resync activity will
6095 * always make curr_events less than last_events.
6098 if (init || curr_events - rdev->last_events > 64) {
6099 rdev->last_events = curr_events;
6107 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6109 /* another "blocks" (512byte) blocks have been synced */
6110 atomic_sub(blocks, &mddev->recovery_active);
6111 wake_up(&mddev->recovery_wait);
6113 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6114 md_wakeup_thread(mddev->thread);
6115 // stop recovery, signal do_sync ....
6120 /* md_write_start(mddev, bi)
6121 * If we need to update some array metadata (e.g. 'active' flag
6122 * in superblock) before writing, schedule a superblock update
6123 * and wait for it to complete.
6125 void md_write_start(mddev_t *mddev, struct bio *bi)
6128 if (bio_data_dir(bi) != WRITE)
6131 BUG_ON(mddev->ro == 1);
6132 if (mddev->ro == 2) {
6133 /* need to switch to read/write */
6135 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6136 md_wakeup_thread(mddev->thread);
6137 md_wakeup_thread(mddev->sync_thread);
6140 atomic_inc(&mddev->writes_pending);
6141 if (mddev->safemode == 1)
6142 mddev->safemode = 0;
6143 if (mddev->in_sync) {
6144 spin_lock_irq(&mddev->write_lock);
6145 if (mddev->in_sync) {
6147 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6148 md_wakeup_thread(mddev->thread);
6151 spin_unlock_irq(&mddev->write_lock);
6154 sysfs_notify_dirent(mddev->sysfs_state);
6155 wait_event(mddev->sb_wait,
6156 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6157 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6160 void md_write_end(mddev_t *mddev)
6162 if (atomic_dec_and_test(&mddev->writes_pending)) {
6163 if (mddev->safemode == 2)
6164 md_wakeup_thread(mddev->thread);
6165 else if (mddev->safemode_delay)
6166 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6170 /* md_allow_write(mddev)
6171 * Calling this ensures that the array is marked 'active' so that writes
6172 * may proceed without blocking. It is important to call this before
6173 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6174 * Must be called with mddev_lock held.
6176 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6177 * is dropped, so return -EAGAIN after notifying userspace.
6179 int md_allow_write(mddev_t *mddev)
6185 if (!mddev->pers->sync_request)
6188 spin_lock_irq(&mddev->write_lock);
6189 if (mddev->in_sync) {
6191 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6192 if (mddev->safemode_delay &&
6193 mddev->safemode == 0)
6194 mddev->safemode = 1;
6195 spin_unlock_irq(&mddev->write_lock);
6196 md_update_sb(mddev, 0);
6197 sysfs_notify_dirent(mddev->sysfs_state);
6199 spin_unlock_irq(&mddev->write_lock);
6201 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6206 EXPORT_SYMBOL_GPL(md_allow_write);
6208 #define SYNC_MARKS 10
6209 #define SYNC_MARK_STEP (3*HZ)
6210 void md_do_sync(mddev_t *mddev)
6213 unsigned int currspeed = 0,
6215 sector_t max_sectors,j, io_sectors;
6216 unsigned long mark[SYNC_MARKS];
6217 sector_t mark_cnt[SYNC_MARKS];
6219 struct list_head *tmp;
6220 sector_t last_check;
6225 /* just incase thread restarts... */
6226 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6228 if (mddev->ro) /* never try to sync a read-only array */
6231 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6232 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6233 desc = "data-check";
6234 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6235 desc = "requested-resync";
6238 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6243 /* we overload curr_resync somewhat here.
6244 * 0 == not engaged in resync at all
6245 * 2 == checking that there is no conflict with another sync
6246 * 1 == like 2, but have yielded to allow conflicting resync to
6248 * other == active in resync - this many blocks
6250 * Before starting a resync we must have set curr_resync to
6251 * 2, and then checked that every "conflicting" array has curr_resync
6252 * less than ours. When we find one that is the same or higher
6253 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6254 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6255 * This will mean we have to start checking from the beginning again.
6260 mddev->curr_resync = 2;
6263 if (kthread_should_stop()) {
6264 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6267 for_each_mddev(mddev2, tmp) {
6268 if (mddev2 == mddev)
6270 if (!mddev->parallel_resync
6271 && mddev2->curr_resync
6272 && match_mddev_units(mddev, mddev2)) {
6274 if (mddev < mddev2 && mddev->curr_resync == 2) {
6275 /* arbitrarily yield */
6276 mddev->curr_resync = 1;
6277 wake_up(&resync_wait);
6279 if (mddev > mddev2 && mddev->curr_resync == 1)
6280 /* no need to wait here, we can wait the next
6281 * time 'round when curr_resync == 2
6284 /* We need to wait 'interruptible' so as not to
6285 * contribute to the load average, and not to
6286 * be caught by 'softlockup'
6288 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6289 if (!kthread_should_stop() &&
6290 mddev2->curr_resync >= mddev->curr_resync) {
6291 printk(KERN_INFO "md: delaying %s of %s"
6292 " until %s has finished (they"
6293 " share one or more physical units)\n",
6294 desc, mdname(mddev), mdname(mddev2));
6296 if (signal_pending(current))
6297 flush_signals(current);
6299 finish_wait(&resync_wait, &wq);
6302 finish_wait(&resync_wait, &wq);
6305 } while (mddev->curr_resync < 2);
6308 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6309 /* resync follows the size requested by the personality,
6310 * which defaults to physical size, but can be virtual size
6312 max_sectors = mddev->resync_max_sectors;
6313 mddev->resync_mismatches = 0;
6314 /* we don't use the checkpoint if there's a bitmap */
6315 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6316 j = mddev->resync_min;
6317 else if (!mddev->bitmap)
6318 j = mddev->recovery_cp;
6320 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6321 max_sectors = mddev->dev_sectors;
6323 /* recovery follows the physical size of devices */
6324 max_sectors = mddev->dev_sectors;
6326 list_for_each_entry(rdev, &mddev->disks, same_set)
6327 if (rdev->raid_disk >= 0 &&
6328 !test_bit(Faulty, &rdev->flags) &&
6329 !test_bit(In_sync, &rdev->flags) &&
6330 rdev->recovery_offset < j)
6331 j = rdev->recovery_offset;
6334 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6335 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6336 " %d KB/sec/disk.\n", speed_min(mddev));
6337 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6338 "(but not more than %d KB/sec) for %s.\n",
6339 speed_max(mddev), desc);
6341 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6344 for (m = 0; m < SYNC_MARKS; m++) {
6346 mark_cnt[m] = io_sectors;
6349 mddev->resync_mark = mark[last_mark];
6350 mddev->resync_mark_cnt = mark_cnt[last_mark];
6353 * Tune reconstruction:
6355 window = 32*(PAGE_SIZE/512);
6356 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6357 window/2,(unsigned long long) max_sectors/2);
6359 atomic_set(&mddev->recovery_active, 0);
6364 "md: resuming %s of %s from checkpoint.\n",
6365 desc, mdname(mddev));
6366 mddev->curr_resync = j;
6369 while (j < max_sectors) {
6374 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6375 ((mddev->curr_resync > mddev->curr_resync_completed &&
6376 (mddev->curr_resync - mddev->curr_resync_completed)
6377 > (max_sectors >> 4)) ||
6378 (j - mddev->curr_resync_completed)*2
6379 >= mddev->resync_max - mddev->curr_resync_completed
6381 /* time to update curr_resync_completed */
6382 blk_unplug(mddev->queue);
6383 wait_event(mddev->recovery_wait,
6384 atomic_read(&mddev->recovery_active) == 0);
6385 mddev->curr_resync_completed =
6387 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6388 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6391 while (j >= mddev->resync_max && !kthread_should_stop()) {
6392 /* As this condition is controlled by user-space,
6393 * we can block indefinitely, so use '_interruptible'
6394 * to avoid triggering warnings.
6396 flush_signals(current); /* just in case */
6397 wait_event_interruptible(mddev->recovery_wait,
6398 mddev->resync_max > j
6399 || kthread_should_stop());
6402 if (kthread_should_stop())
6405 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6406 currspeed < speed_min(mddev));
6408 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6412 if (!skipped) { /* actual IO requested */
6413 io_sectors += sectors;
6414 atomic_add(sectors, &mddev->recovery_active);
6418 if (j>1) mddev->curr_resync = j;
6419 mddev->curr_mark_cnt = io_sectors;
6420 if (last_check == 0)
6421 /* this is the earliers that rebuilt will be
6422 * visible in /proc/mdstat
6424 md_new_event(mddev);
6426 if (last_check + window > io_sectors || j == max_sectors)
6429 last_check = io_sectors;
6431 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6435 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6437 int next = (last_mark+1) % SYNC_MARKS;
6439 mddev->resync_mark = mark[next];
6440 mddev->resync_mark_cnt = mark_cnt[next];
6441 mark[next] = jiffies;
6442 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6447 if (kthread_should_stop())
6452 * this loop exits only if either when we are slower than
6453 * the 'hard' speed limit, or the system was IO-idle for
6455 * the system might be non-idle CPU-wise, but we only care
6456 * about not overloading the IO subsystem. (things like an
6457 * e2fsck being done on the RAID array should execute fast)
6459 blk_unplug(mddev->queue);
6462 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6463 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6465 if (currspeed > speed_min(mddev)) {
6466 if ((currspeed > speed_max(mddev)) ||
6467 !is_mddev_idle(mddev, 0)) {
6473 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6475 * this also signals 'finished resyncing' to md_stop
6478 blk_unplug(mddev->queue);
6480 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6482 /* tell personality that we are finished */
6483 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6485 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6486 mddev->curr_resync > 2) {
6487 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6488 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6489 if (mddev->curr_resync >= mddev->recovery_cp) {
6491 "md: checkpointing %s of %s.\n",
6492 desc, mdname(mddev));
6493 mddev->recovery_cp = mddev->curr_resync;
6496 mddev->recovery_cp = MaxSector;
6498 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6499 mddev->curr_resync = MaxSector;
6500 list_for_each_entry(rdev, &mddev->disks, same_set)
6501 if (rdev->raid_disk >= 0 &&
6502 !test_bit(Faulty, &rdev->flags) &&
6503 !test_bit(In_sync, &rdev->flags) &&
6504 rdev->recovery_offset < mddev->curr_resync)
6505 rdev->recovery_offset = mddev->curr_resync;
6508 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6511 mddev->curr_resync = 0;
6512 mddev->curr_resync_completed = 0;
6513 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6514 /* We completed so max setting can be forgotten. */
6515 mddev->resync_max = MaxSector;
6516 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6517 wake_up(&resync_wait);
6518 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6519 md_wakeup_thread(mddev->thread);
6524 * got a signal, exit.
6527 "md: md_do_sync() got signal ... exiting\n");
6528 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6532 EXPORT_SYMBOL_GPL(md_do_sync);
6535 static int remove_and_add_spares(mddev_t *mddev)
6540 mddev->curr_resync_completed = 0;
6542 list_for_each_entry(rdev, &mddev->disks, same_set)
6543 if (rdev->raid_disk >= 0 &&
6544 !test_bit(Blocked, &rdev->flags) &&
6545 (test_bit(Faulty, &rdev->flags) ||
6546 ! test_bit(In_sync, &rdev->flags)) &&
6547 atomic_read(&rdev->nr_pending)==0) {
6548 if (mddev->pers->hot_remove_disk(
6549 mddev, rdev->raid_disk)==0) {
6551 sprintf(nm,"rd%d", rdev->raid_disk);
6552 sysfs_remove_link(&mddev->kobj, nm);
6553 rdev->raid_disk = -1;
6557 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6558 list_for_each_entry(rdev, &mddev->disks, same_set) {
6559 if (rdev->raid_disk >= 0 &&
6560 !test_bit(In_sync, &rdev->flags) &&
6561 !test_bit(Blocked, &rdev->flags))
6563 if (rdev->raid_disk < 0
6564 && !test_bit(Faulty, &rdev->flags)) {
6565 rdev->recovery_offset = 0;
6567 hot_add_disk(mddev, rdev) == 0) {
6569 sprintf(nm, "rd%d", rdev->raid_disk);
6570 if (sysfs_create_link(&mddev->kobj,
6573 "md: cannot register "
6577 md_new_event(mddev);
6586 * This routine is regularly called by all per-raid-array threads to
6587 * deal with generic issues like resync and super-block update.
6588 * Raid personalities that don't have a thread (linear/raid0) do not
6589 * need this as they never do any recovery or update the superblock.
6591 * It does not do any resync itself, but rather "forks" off other threads
6592 * to do that as needed.
6593 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6594 * "->recovery" and create a thread at ->sync_thread.
6595 * When the thread finishes it sets MD_RECOVERY_DONE
6596 * and wakeups up this thread which will reap the thread and finish up.
6597 * This thread also removes any faulty devices (with nr_pending == 0).
6599 * The overall approach is:
6600 * 1/ if the superblock needs updating, update it.
6601 * 2/ If a recovery thread is running, don't do anything else.
6602 * 3/ If recovery has finished, clean up, possibly marking spares active.
6603 * 4/ If there are any faulty devices, remove them.
6604 * 5/ If array is degraded, try to add spares devices
6605 * 6/ If array has spares or is not in-sync, start a resync thread.
6607 void md_check_recovery(mddev_t *mddev)
6613 bitmap_daemon_work(mddev->bitmap);
6618 if (signal_pending(current)) {
6619 if (mddev->pers->sync_request && !mddev->external) {
6620 printk(KERN_INFO "md: %s in immediate safe mode\n",
6622 mddev->safemode = 2;
6624 flush_signals(current);
6627 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6630 (mddev->flags && !mddev->external) ||
6631 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6632 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6633 (mddev->external == 0 && mddev->safemode == 1) ||
6634 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6635 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6639 if (mddev_trylock(mddev)) {
6643 /* Only thing we do on a ro array is remove
6646 remove_and_add_spares(mddev);
6647 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6651 if (!mddev->external) {
6653 spin_lock_irq(&mddev->write_lock);
6654 if (mddev->safemode &&
6655 !atomic_read(&mddev->writes_pending) &&
6657 mddev->recovery_cp == MaxSector) {
6660 if (mddev->persistent)
6661 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6663 if (mddev->safemode == 1)
6664 mddev->safemode = 0;
6665 spin_unlock_irq(&mddev->write_lock);
6667 sysfs_notify_dirent(mddev->sysfs_state);
6671 md_update_sb(mddev, 0);
6673 list_for_each_entry(rdev, &mddev->disks, same_set)
6674 if (test_and_clear_bit(StateChanged, &rdev->flags))
6675 sysfs_notify_dirent(rdev->sysfs_state);
6678 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6679 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6680 /* resync/recovery still happening */
6681 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6684 if (mddev->sync_thread) {
6685 /* resync has finished, collect result */
6686 md_unregister_thread(mddev->sync_thread);
6687 mddev->sync_thread = NULL;
6688 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6689 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6691 /* activate any spares */
6692 if (mddev->pers->spare_active(mddev))
6693 sysfs_notify(&mddev->kobj, NULL,
6696 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6697 mddev->pers->finish_reshape)
6698 mddev->pers->finish_reshape(mddev);
6699 md_update_sb(mddev, 1);
6701 /* if array is no-longer degraded, then any saved_raid_disk
6702 * information must be scrapped
6704 if (!mddev->degraded)
6705 list_for_each_entry(rdev, &mddev->disks, same_set)
6706 rdev->saved_raid_disk = -1;
6708 mddev->recovery = 0;
6709 /* flag recovery needed just to double check */
6710 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6711 sysfs_notify_dirent(mddev->sysfs_action);
6712 md_new_event(mddev);
6715 /* Set RUNNING before clearing NEEDED to avoid
6716 * any transients in the value of "sync_action".
6718 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6719 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6720 /* Clear some bits that don't mean anything, but
6723 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6724 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6726 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6728 /* no recovery is running.
6729 * remove any failed drives, then
6730 * add spares if possible.
6731 * Spare are also removed and re-added, to allow
6732 * the personality to fail the re-add.
6735 if (mddev->reshape_position != MaxSector) {
6736 if (mddev->pers->check_reshape == NULL ||
6737 mddev->pers->check_reshape(mddev) != 0)
6738 /* Cannot proceed */
6740 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6741 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6742 } else if ((spares = remove_and_add_spares(mddev))) {
6743 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6744 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6745 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6746 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6747 } else if (mddev->recovery_cp < MaxSector) {
6748 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6749 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6750 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6751 /* nothing to be done ... */
6754 if (mddev->pers->sync_request) {
6755 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6756 /* We are adding a device or devices to an array
6757 * which has the bitmap stored on all devices.
6758 * So make sure all bitmap pages get written
6760 bitmap_write_all(mddev->bitmap);
6762 mddev->sync_thread = md_register_thread(md_do_sync,
6765 if (!mddev->sync_thread) {
6766 printk(KERN_ERR "%s: could not start resync"
6769 /* leave the spares where they are, it shouldn't hurt */
6770 mddev->recovery = 0;
6772 md_wakeup_thread(mddev->sync_thread);
6773 sysfs_notify_dirent(mddev->sysfs_action);
6774 md_new_event(mddev);
6777 if (!mddev->sync_thread) {
6778 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6779 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6781 if (mddev->sysfs_action)
6782 sysfs_notify_dirent(mddev->sysfs_action);
6784 mddev_unlock(mddev);
6788 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6790 sysfs_notify_dirent(rdev->sysfs_state);
6791 wait_event_timeout(rdev->blocked_wait,
6792 !test_bit(Blocked, &rdev->flags),
6793 msecs_to_jiffies(5000));
6794 rdev_dec_pending(rdev, mddev);
6796 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6798 static int md_notify_reboot(struct notifier_block *this,
6799 unsigned long code, void *x)
6801 struct list_head *tmp;
6804 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6806 printk(KERN_INFO "md: stopping all md devices.\n");
6808 for_each_mddev(mddev, tmp)
6809 if (mddev_trylock(mddev)) {
6810 /* Force a switch to readonly even array
6811 * appears to still be in use. Hence
6814 do_md_stop(mddev, 1, 100);
6815 mddev_unlock(mddev);
6818 * certain more exotic SCSI devices are known to be
6819 * volatile wrt too early system reboots. While the
6820 * right place to handle this issue is the given
6821 * driver, we do want to have a safe RAID driver ...
6828 static struct notifier_block md_notifier = {
6829 .notifier_call = md_notify_reboot,
6831 .priority = INT_MAX, /* before any real devices */
6834 static void md_geninit(void)
6836 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6838 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6841 static int __init md_init(void)
6843 if (register_blkdev(MD_MAJOR, "md"))
6845 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6846 unregister_blkdev(MD_MAJOR, "md");
6849 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
6850 md_probe, NULL, NULL);
6851 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6852 md_probe, NULL, NULL);
6854 register_reboot_notifier(&md_notifier);
6855 raid_table_header = register_sysctl_table(raid_root_table);
6865 * Searches all registered partitions for autorun RAID arrays
6869 static LIST_HEAD(all_detected_devices);
6870 struct detected_devices_node {
6871 struct list_head list;
6875 void md_autodetect_dev(dev_t dev)
6877 struct detected_devices_node *node_detected_dev;
6879 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6880 if (node_detected_dev) {
6881 node_detected_dev->dev = dev;
6882 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6884 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6885 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6890 static void autostart_arrays(int part)
6893 struct detected_devices_node *node_detected_dev;
6895 int i_scanned, i_passed;
6900 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6902 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6904 node_detected_dev = list_entry(all_detected_devices.next,
6905 struct detected_devices_node, list);
6906 list_del(&node_detected_dev->list);
6907 dev = node_detected_dev->dev;
6908 kfree(node_detected_dev);
6909 rdev = md_import_device(dev,0, 90);
6913 if (test_bit(Faulty, &rdev->flags)) {
6917 set_bit(AutoDetected, &rdev->flags);
6918 list_add(&rdev->same_set, &pending_raid_disks);
6922 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6923 i_scanned, i_passed);
6925 autorun_devices(part);
6928 #endif /* !MODULE */
6930 static __exit void md_exit(void)
6933 struct list_head *tmp;
6935 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
6936 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6938 unregister_blkdev(MD_MAJOR,"md");
6939 unregister_blkdev(mdp_major, "mdp");
6940 unregister_reboot_notifier(&md_notifier);
6941 unregister_sysctl_table(raid_table_header);
6942 remove_proc_entry("mdstat", NULL);
6943 for_each_mddev(mddev, tmp) {
6944 export_array(mddev);
6945 mddev->hold_active = 0;
6949 subsys_initcall(md_init);
6950 module_exit(md_exit)
6952 static int get_ro(char *buffer, struct kernel_param *kp)
6954 return sprintf(buffer, "%d", start_readonly);
6956 static int set_ro(const char *val, struct kernel_param *kp)
6959 int num = simple_strtoul(val, &e, 10);
6960 if (*val && (*e == '\0' || *e == '\n')) {
6961 start_readonly = num;
6967 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6968 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6970 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
6972 EXPORT_SYMBOL(register_md_personality);
6973 EXPORT_SYMBOL(unregister_md_personality);
6974 EXPORT_SYMBOL(md_error);
6975 EXPORT_SYMBOL(md_done_sync);
6976 EXPORT_SYMBOL(md_write_start);
6977 EXPORT_SYMBOL(md_write_end);
6978 EXPORT_SYMBOL(md_register_thread);
6979 EXPORT_SYMBOL(md_unregister_thread);
6980 EXPORT_SYMBOL(md_wakeup_thread);
6981 EXPORT_SYMBOL(md_check_recovery);
6982 MODULE_LICENSE("GPL");
6984 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);